Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dev_mtd.c
Go to the documentation of this file.
1 /*
2  * fs/logfs/dev_mtd.c - Device access methods for MTD
3  *
4  * As should be obvious for Linux kernel code, license is GPLv2
5  *
6  * Copyright (c) 2005-2008 Joern Engel <[email protected]>
7  */
8 #include "logfs.h"
9 #include <linux/completion.h>
10 #include <linux/mount.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
15 
16 static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
17  void *buf)
18 {
19  struct mtd_info *mtd = logfs_super(sb)->s_mtd;
20  size_t retlen;
21  int ret;
22 
23  ret = mtd_read(mtd, ofs, len, &retlen, buf);
24  BUG_ON(ret == -EINVAL);
25  if (ret)
26  return ret;
27 
28  /* Not sure if we should loop instead. */
29  if (retlen != len)
30  return -EIO;
31 
32  return 0;
33 }
34 
35 static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
36  void *buf)
37 {
38  struct logfs_super *super = logfs_super(sb);
39  struct mtd_info *mtd = super->s_mtd;
40  size_t retlen;
41  loff_t page_start, page_end;
42  int ret;
43 
44  if (super->s_flags & LOGFS_SB_FLAG_RO)
45  return -EROFS;
46 
47  BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
48  BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
49  BUG_ON(len > PAGE_CACHE_SIZE);
50  page_start = ofs & PAGE_CACHE_MASK;
51  page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
52  ret = mtd_write(mtd, ofs, len, &retlen, buf);
53  if (ret || (retlen != len))
54  return -EIO;
55 
56  return 0;
57 }
58 
59 /*
60  * For as long as I can remember (since about 2001) mtd->erase has been an
61  * asynchronous interface lacking the first driver to actually use the
62  * asynchronous properties. So just to prevent the first implementor of such
63  * a thing from breaking logfs in 2350, we do the usual pointless dance to
64  * declare a completion variable and wait for completion before returning
65  * from logfs_mtd_erase(). What an exercise in futility!
66  */
67 static void logfs_erase_callback(struct erase_info *ei)
68 {
69  complete((struct completion *)ei->priv);
70 }
71 
72 static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
73  size_t len)
74 {
75  struct logfs_super *super = logfs_super(sb);
76  struct address_space *mapping = super->s_mapping_inode->i_mapping;
77  struct page *page;
78  pgoff_t index = ofs >> PAGE_SHIFT;
79 
80  for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
81  page = find_get_page(mapping, index);
82  if (!page)
83  continue;
84  memset(page_address(page), 0xFF, PAGE_SIZE);
85  page_cache_release(page);
86  }
87  return 0;
88 }
89 
90 static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
91  int ensure_write)
92 {
93  struct mtd_info *mtd = logfs_super(sb)->s_mtd;
94  struct erase_info ei;
96  int ret;
97 
98  BUG_ON(len % mtd->erasesize);
100  return -EROFS;
101 
102  memset(&ei, 0, sizeof(ei));
103  ei.mtd = mtd;
104  ei.addr = ofs;
105  ei.len = len;
106  ei.callback = logfs_erase_callback;
107  ei.priv = (long)&complete;
108  ret = mtd_erase(mtd, &ei);
109  if (ret)
110  return -EIO;
111 
113  if (ei.state != MTD_ERASE_DONE)
114  return -EIO;
115  return logfs_mtd_erase_mapping(sb, ofs, len);
116 }
117 
118 static void logfs_mtd_sync(struct super_block *sb)
119 {
120  struct mtd_info *mtd = logfs_super(sb)->s_mtd;
121 
122  mtd_sync(mtd);
123 }
124 
125 static int logfs_mtd_readpage(void *_sb, struct page *page)
126 {
127  struct super_block *sb = _sb;
128  int err;
129 
130  err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
131  page_address(page));
132  if (err == -EUCLEAN || err == -EBADMSG) {
133  /* -EBADMSG happens regularly on power failures */
134  err = 0;
135  /* FIXME: force GC this segment */
136  }
137  if (err) {
138  ClearPageUptodate(page);
139  SetPageError(page);
140  } else {
141  SetPageUptodate(page);
142  ClearPageError(page);
143  }
144  unlock_page(page);
145  return err;
146 }
147 
148 static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
149 {
150  struct logfs_super *super = logfs_super(sb);
151  struct address_space *mapping = super->s_mapping_inode->i_mapping;
152  filler_t *filler = logfs_mtd_readpage;
153  struct mtd_info *mtd = super->s_mtd;
154 
155  *ofs = 0;
156  while (mtd_block_isbad(mtd, *ofs)) {
157  *ofs += mtd->erasesize;
158  if (*ofs >= mtd->size)
159  return NULL;
160  }
161  BUG_ON(*ofs & ~PAGE_MASK);
162  return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
163 }
164 
165 static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
166 {
167  struct logfs_super *super = logfs_super(sb);
168  struct address_space *mapping = super->s_mapping_inode->i_mapping;
169  filler_t *filler = logfs_mtd_readpage;
170  struct mtd_info *mtd = super->s_mtd;
171 
172  *ofs = mtd->size - mtd->erasesize;
173  while (mtd_block_isbad(mtd, *ofs)) {
174  *ofs -= mtd->erasesize;
175  if (*ofs <= 0)
176  return NULL;
177  }
178  *ofs = *ofs + mtd->erasesize - 0x1000;
179  BUG_ON(*ofs & ~PAGE_MASK);
180  return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
181 }
182 
183 static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
184  size_t nr_pages)
185 {
186  struct logfs_super *super = logfs_super(sb);
187  struct address_space *mapping = super->s_mapping_inode->i_mapping;
188  struct page *page;
189  int i, err;
190 
191  for (i = 0; i < nr_pages; i++) {
192  page = find_lock_page(mapping, index + i);
193  BUG_ON(!page);
194 
195  err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
196  page_address(page));
197  unlock_page(page);
198  page_cache_release(page);
199  if (err)
200  return err;
201  }
202  return 0;
203 }
204 
205 static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
206 {
207  struct logfs_super *super = logfs_super(sb);
208  int head;
209 
210  if (super->s_flags & LOGFS_SB_FLAG_RO)
211  return;
212 
213  if (len == 0) {
214  /* This can happen when the object fit perfectly into a
215  * segment, the segment gets written per sync and subsequently
216  * closed.
217  */
218  return;
219  }
220  head = ofs & (PAGE_SIZE - 1);
221  if (head) {
222  ofs -= head;
223  len += head;
224  }
225  len = PAGE_ALIGN(len);
226  __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
227 }
228 
229 static void logfs_mtd_put_device(struct logfs_super *s)
230 {
231  put_mtd_device(s->s_mtd);
232 }
233 
234 static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
235 {
236  struct logfs_super *super = logfs_super(sb);
237  void *buf;
238  int err;
239 
240  buf = kmalloc(super->s_writesize, GFP_KERNEL);
241  if (!buf)
242  return -ENOMEM;
243  err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
244  if (err)
245  goto out;
246  if (memchr_inv(buf, 0xff, super->s_writesize))
247  err = -EIO;
248  kfree(buf);
249 out:
250  return err;
251 }
252 
253 static const struct logfs_device_ops mtd_devops = {
254  .find_first_sb = logfs_mtd_find_first_sb,
255  .find_last_sb = logfs_mtd_find_last_sb,
256  .readpage = logfs_mtd_readpage,
257  .writeseg = logfs_mtd_writeseg,
258  .erase = logfs_mtd_erase,
259  .can_write_buf = logfs_mtd_can_write_buf,
260  .sync = logfs_mtd_sync,
261  .put_device = logfs_mtd_put_device,
262 };
263 
264 int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
265 {
266  struct mtd_info *mtd = get_mtd_device(NULL, mtdnr);
267  if (IS_ERR(mtd))
268  return PTR_ERR(mtd);
269 
270  s->s_bdev = NULL;
271  s->s_mtd = mtd;
272  s->s_devops = &mtd_devops;
273  return 0;
274 }