Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
wbuf.c
Go to the documentation of this file.
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004 Thomas Gleixner <[email protected]>
6  *
7  * Created by David Woodhouse <[email protected]>
8  * Modified debugged and enhanced by Thomas Gleixner <[email protected]>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23 #include <linux/writeback.h>
24 
25 #include "nodelist.h"
26 
27 /* For testing write failures */
28 #undef BREAKME
29 #undef BREAKMEHEADER
30 
31 #ifdef BREAKME
32 static unsigned char *brokenbuf;
33 #endif
34 
35 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
36 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
37 
38 /* max. erase failures before we mark a block bad */
39 #define MAX_ERASE_FAILURES 2
40 
44 };
45 
46 static struct jffs2_inodirty inodirty_nomem;
47 
48 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
49 {
50  struct jffs2_inodirty *this = c->wbuf_inodes;
51 
52  /* If a malloc failed, consider _everything_ dirty */
53  if (this == &inodirty_nomem)
54  return 1;
55 
56  /* If ino == 0, _any_ non-GC writes mean 'yes' */
57  if (this && !ino)
58  return 1;
59 
60  /* Look to see if the inode in question is pending in the wbuf */
61  while (this) {
62  if (this->ino == ino)
63  return 1;
64  this = this->next;
65  }
66  return 0;
67 }
68 
69 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
70 {
71  struct jffs2_inodirty *this;
72 
73  this = c->wbuf_inodes;
74 
75  if (this != &inodirty_nomem) {
76  while (this) {
77  struct jffs2_inodirty *next = this->next;
78  kfree(this);
79  this = next;
80  }
81  }
82  c->wbuf_inodes = NULL;
83 }
84 
85 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
86 {
87  struct jffs2_inodirty *new;
88 
89  /* Schedule delayed write-buffer write-out */
91 
92  if (jffs2_wbuf_pending_for_ino(c, ino))
93  return;
94 
95  new = kmalloc(sizeof(*new), GFP_KERNEL);
96  if (!new) {
97  jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
98  jffs2_clear_wbuf_ino_list(c);
99  c->wbuf_inodes = &inodirty_nomem;
100  return;
101  }
102  new->ino = ino;
103  new->next = c->wbuf_inodes;
104  c->wbuf_inodes = new;
105  return;
106 }
107 
108 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
109 {
110  struct list_head *this, *next;
111  static int n;
112 
113  if (list_empty(&c->erasable_pending_wbuf_list))
114  return;
115 
117  struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
118 
119  jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
120  jeb->offset);
121  list_del(this);
122  if ((jiffies + (n++)) & 127) {
123  /* Most of the time, we just erase it immediately. Otherwise we
124  spend ages scanning it on mount, etc. */
125  jffs2_dbg(1, "...and adding to erase_pending_list\n");
127  c->nr_erasing_blocks++;
129  } else {
130  /* Sometimes, however, we leave it elsewhere so it doesn't get
131  immediately reused, and we spread the load a bit. */
132  jffs2_dbg(1, "...and adding to erasable_list\n");
133  list_add_tail(&jeb->list, &c->erasable_list);
134  }
135  }
136 }
137 
138 #define REFILE_NOTEMPTY 0
139 #define REFILE_ANYWAY 1
140 
141 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
142 {
143  jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
144 
145  /* File the existing block on the bad_used_list.... */
146  if (c->nextblock == jeb)
147  c->nextblock = NULL;
148  else /* Not sure this should ever happen... need more coffee */
149  list_del(&jeb->list);
150  if (jeb->first_node) {
151  jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
152  jeb->offset);
153  list_add(&jeb->list, &c->bad_used_list);
154  } else {
155  BUG_ON(allow_empty == REFILE_NOTEMPTY);
156  /* It has to have had some nodes or we couldn't be here */
157  jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
158  jeb->offset);
159  list_add(&jeb->list, &c->erase_pending_list);
160  c->nr_erasing_blocks++;
162  }
163 
164  if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
165  uint32_t oldfree = jeb->free_size;
166 
167  jffs2_link_node_ref(c, jeb,
168  (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
169  oldfree, NULL);
170  /* convert to wasted */
171  c->wasted_size += oldfree;
172  jeb->wasted_size += oldfree;
173  c->dirty_size -= oldfree;
174  jeb->dirty_size -= oldfree;
175  }
176 
180 }
181 
182 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
183  struct jffs2_inode_info *f,
184  struct jffs2_raw_node_ref *raw,
185  union jffs2_node_union *node)
186 {
187  struct jffs2_node_frag *frag;
188  struct jffs2_full_dirent *fd;
189 
190  dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
191  node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
192 
193  BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
194  je16_to_cpu(node->u.magic) != 0);
195 
196  switch (je16_to_cpu(node->u.nodetype)) {
198  if (f->metadata && f->metadata->raw == raw) {
199  dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
200  return &f->metadata->raw;
201  }
202  frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
203  BUG_ON(!frag);
204  /* Find a frag which refers to the full_dnode we want to modify */
205  while (!frag->node || frag->node->raw != raw) {
206  frag = frag_next(frag);
207  BUG_ON(!frag);
208  }
209  dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
210  return &frag->node->raw;
211 
213  for (fd = f->dents; fd; fd = fd->next) {
214  if (fd->raw == raw) {
215  dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
216  return &fd->raw;
217  }
218  }
219  BUG();
220 
221  default:
222  dbg_noderef("Don't care about replacing raw for nodetype %x\n",
223  je16_to_cpu(node->u.nodetype));
224  break;
225  }
226  return NULL;
227 }
228 
229 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
230 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
231  uint32_t ofs)
232 {
233  int ret;
234  size_t retlen;
235  char *eccstr;
236 
237  ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
238  if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
239  pr_warn("%s(): Read back of page at %08x failed: %d\n",
240  __func__, c->wbuf_ofs, ret);
241  return ret;
242  } else if (retlen != c->wbuf_pagesize) {
243  pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
244  __func__, ofs, retlen, c->wbuf_pagesize);
245  return -EIO;
246  }
247  if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
248  return 0;
249 
250  if (ret == -EUCLEAN)
251  eccstr = "corrected";
252  else if (ret == -EBADMSG)
253  eccstr = "correction failed";
254  else
255  eccstr = "OK or unused";
256 
257  pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
258  eccstr, c->wbuf_ofs);
259  print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
260  c->wbuf, c->wbuf_pagesize, 0);
261 
262  pr_warn("Read back:\n");
263  print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
264  c->wbuf_verify, c->wbuf_pagesize, 0);
265 
266  return -EIO;
267 }
268 #else
269 #define jffs2_verify_write(c,b,o) (0)
270 #endif
271 
272 /* Recover from failure to write wbuf. Recover the nodes up to the
273  * wbuf, not the one which we were starting to try to write. */
274 
275 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
276 {
277  struct jffs2_eraseblock *jeb, *new_jeb;
278  struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
279  size_t retlen;
280  int ret;
281  int nr_refile = 0;
282  unsigned char *buf;
283  uint32_t start, end, ofs, len;
284 
285  jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
286 
287  spin_lock(&c->erase_completion_lock);
288  if (c->wbuf_ofs % c->mtd->erasesize)
289  jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
290  else
291  jffs2_block_refile(c, jeb, REFILE_ANYWAY);
292  spin_unlock(&c->erase_completion_lock);
293 
294  BUG_ON(!ref_obsolete(jeb->last_node));
295 
296  /* Find the first node to be recovered, by skipping over every
297  node which ends before the wbuf starts, or which is obsolete. */
298  for (next = raw = jeb->first_node; next; raw = next) {
299  next = ref_next(raw);
300 
301  if (ref_obsolete(raw) ||
302  (next && ref_offset(next) <= c->wbuf_ofs)) {
303  dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
304  ref_offset(raw), ref_flags(raw),
305  (ref_offset(raw) + ref_totlen(c, jeb, raw)),
306  c->wbuf_ofs);
307  continue;
308  }
309  dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
310  ref_offset(raw), ref_flags(raw),
311  (ref_offset(raw) + ref_totlen(c, jeb, raw)));
312 
313  first_raw = raw;
314  break;
315  }
316 
317  if (!first_raw) {
318  /* All nodes were obsolete. Nothing to recover. */
319  jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
320  c->wbuf_len = 0;
321  return;
322  }
323 
324  start = ref_offset(first_raw);
325  end = ref_offset(jeb->last_node);
326  nr_refile = 1;
327 
328  /* Count the number of refs which need to be copied */
329  while ((raw = ref_next(raw)) != jeb->last_node)
330  nr_refile++;
331 
332  dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
333  start, end, end - start, nr_refile);
334 
335  buf = NULL;
336  if (start < c->wbuf_ofs) {
337  /* First affected node was already partially written.
338  * Attempt to reread the old data into our buffer. */
339 
340  buf = kmalloc(end - start, GFP_KERNEL);
341  if (!buf) {
342  pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
343 
344  goto read_failed;
345  }
346 
347  /* Do the read... */
348  ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
349  buf);
350 
351  /* ECC recovered ? */
352  if ((ret == -EUCLEAN || ret == -EBADMSG) &&
353  (retlen == c->wbuf_ofs - start))
354  ret = 0;
355 
356  if (ret || retlen != c->wbuf_ofs - start) {
357  pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
358 
359  kfree(buf);
360  buf = NULL;
361  read_failed:
362  first_raw = ref_next(first_raw);
363  nr_refile--;
364  while (first_raw && ref_obsolete(first_raw)) {
365  first_raw = ref_next(first_raw);
366  nr_refile--;
367  }
368 
369  /* If this was the only node to be recovered, give up */
370  if (!first_raw) {
371  c->wbuf_len = 0;
372  return;
373  }
374 
375  /* It wasn't. Go on and try to recover nodes complete in the wbuf */
376  start = ref_offset(first_raw);
377  dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
378  start, end, end - start, nr_refile);
379 
380  } else {
381  /* Read succeeded. Copy the remaining data from the wbuf */
382  memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
383  }
384  }
385  /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
386  Either 'buf' contains the data, or we find it in the wbuf */
387 
388  /* ... and get an allocation of space from a shiny new block instead */
389  ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
390  if (ret) {
391  pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
392  kfree(buf);
393  return;
394  }
395 
396  /* The summary is not recovered, so it must be disabled for this erase block */
398 
399  ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
400  if (ret) {
401  pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
402  kfree(buf);
403  return;
404  }
405 
406  ofs = write_ofs(c);
407 
408  if (end-start >= c->wbuf_pagesize) {
409  /* Need to do another write immediately, but it's possible
410  that this is just because the wbuf itself is completely
411  full, and there's nothing earlier read back from the
412  flash. Hence 'buf' isn't necessarily what we're writing
413  from. */
414  unsigned char *rewrite_buf = buf?:c->wbuf;
415  uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
416 
417  jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
418  towrite, ofs);
419 
420 #ifdef BREAKMEHEADER
421  static int breakme;
422  if (breakme++ == 20) {
423  pr_notice("Faking write error at 0x%08x\n", ofs);
424  breakme = 0;
425  mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
426  ret = -EIO;
427  } else
428 #endif
429  ret = mtd_write(c->mtd, ofs, towrite, &retlen,
430  rewrite_buf);
431 
432  if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
433  /* Argh. We tried. Really we did. */
434  pr_crit("Recovery of wbuf failed due to a second write error\n");
435  kfree(buf);
436 
437  if (retlen)
438  jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
439 
440  return;
441  }
442  pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
443 
444  c->wbuf_len = (end - start) - towrite;
445  c->wbuf_ofs = ofs + towrite;
446  memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
447  /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
448  } else {
449  /* OK, now we're left with the dregs in whichever buffer we're using */
450  if (buf) {
451  memcpy(c->wbuf, buf, end-start);
452  } else {
453  memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
454  }
455  c->wbuf_ofs = ofs;
456  c->wbuf_len = end - start;
457  }
458 
459  /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
460  new_jeb = &c->blocks[ofs / c->sector_size];
461 
462  spin_lock(&c->erase_completion_lock);
463  for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
464  uint32_t rawlen = ref_totlen(c, jeb, raw);
465  struct jffs2_inode_cache *ic;
466  struct jffs2_raw_node_ref *new_ref;
467  struct jffs2_raw_node_ref **adjust_ref = NULL;
468  struct jffs2_inode_info *f = NULL;
469 
470  jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
471  rawlen, ref_offset(raw), ref_flags(raw), ofs);
472 
473  ic = jffs2_raw_ref_to_ic(raw);
474 
475  /* Ick. This XATTR mess should be fixed shortly... */
476  if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
477  struct jffs2_xattr_datum *xd = (void *)ic;
478  BUG_ON(xd->node != raw);
479  adjust_ref = &xd->node;
480  raw->next_in_ino = NULL;
481  ic = NULL;
482  } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
483  struct jffs2_xattr_datum *xr = (void *)ic;
484  BUG_ON(xr->node != raw);
485  adjust_ref = &xr->node;
486  raw->next_in_ino = NULL;
487  ic = NULL;
488  } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
489  struct jffs2_raw_node_ref **p = &ic->nodes;
490 
491  /* Remove the old node from the per-inode list */
492  while (*p && *p != (void *)ic) {
493  if (*p == raw) {
494  (*p) = (raw->next_in_ino);
495  raw->next_in_ino = NULL;
496  break;
497  }
498  p = &((*p)->next_in_ino);
499  }
500 
501  if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
502  /* If it's an in-core inode, then we have to adjust any
503  full_dirent or full_dnode structure to point to the
504  new version instead of the old */
505  f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
506  if (IS_ERR(f)) {
507  /* Should never happen; it _must_ be present */
508  JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
509  ic->ino, PTR_ERR(f));
510  BUG();
511  }
512  /* We don't lock f->sem. There's a number of ways we could
513  end up in here with it already being locked, and nobody's
514  going to modify it on us anyway because we hold the
515  alloc_sem. We're only changing one ->raw pointer too,
516  which we can get away with without upsetting readers. */
517  adjust_ref = jffs2_incore_replace_raw(c, f, raw,
518  (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
519  } else if (unlikely(ic->state != INO_STATE_PRESENT &&
521  ic->state != INO_STATE_GC)) {
522  JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
523  BUG();
524  }
525  }
526 
527  new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
528 
529  if (adjust_ref) {
530  BUG_ON(*adjust_ref != raw);
531  *adjust_ref = new_ref;
532  }
533  if (f)
535 
536  if (!ref_obsolete(raw)) {
537  jeb->dirty_size += rawlen;
538  jeb->used_size -= rawlen;
539  c->dirty_size += rawlen;
540  c->used_size -= rawlen;
541  raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
542  BUG_ON(raw->next_in_ino);
543  }
544  ofs += rawlen;
545  }
546 
547  kfree(buf);
548 
549  /* Fix up the original jeb now it's on the bad_list */
550  if (first_raw == jeb->first_node) {
551  jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
552  jeb->offset);
553  list_move(&jeb->list, &c->erase_pending_list);
554  c->nr_erasing_blocks++;
556  }
557 
560 
563 
564  spin_unlock(&c->erase_completion_lock);
565 
566  jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
567  c->wbuf_ofs, c->wbuf_len);
568 
569 }
570 
571 /* Meaning of pad argument:
572  0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
573  1: Pad, do not adjust nextblock free_size
574  2: Pad, adjust nextblock free_size
575 */
576 #define NOPAD 0
577 #define PAD_NOACCOUNT 1
578 #define PAD_ACCOUNTING 2
579 
580 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
581 {
582  struct jffs2_eraseblock *wbuf_jeb;
583  int ret;
584  size_t retlen;
585 
586  /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
587  del_timer() the timer we never initialised. */
588  if (!jffs2_is_writebuffered(c))
589  return 0;
590 
591  if (!mutex_is_locked(&c->alloc_sem)) {
592  pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
593  BUG();
594  }
595 
596  if (!c->wbuf_len) /* already checked c->wbuf above */
597  return 0;
598 
599  wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
600  if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
601  return -ENOMEM;
602 
603  /* claim remaining space on the page
604  this happens, if we have a change to a new block,
605  or if fsync forces us to flush the writebuffer.
606  if we have a switch to next page, we will not have
607  enough remaining space for this.
608  */
609  if (pad ) {
610  c->wbuf_len = PAD(c->wbuf_len);
611 
612  /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
613  with 8 byte page size */
614  memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
615 
616  if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
617  struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
620  padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
621  padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
622  }
623  }
624  /* else jffs2_flash_writev has actually filled in the rest of the
625  buffer for us, and will deal with the node refs etc. later. */
626 
627 #ifdef BREAKME
628  static int breakme;
629  if (breakme++ == 20) {
630  pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
631  breakme = 0;
632  mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
633  brokenbuf);
634  ret = -EIO;
635  } else
636 #endif
637 
638  ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
639  &retlen, c->wbuf);
640 
641  if (ret) {
642  pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
643  goto wfail;
644  } else if (retlen != c->wbuf_pagesize) {
645  pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
646  retlen, c->wbuf_pagesize);
647  ret = -EIO;
648  goto wfail;
649  } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
650  wfail:
651  jffs2_wbuf_recover(c);
652 
653  return ret;
654  }
655 
656  /* Adjust free size of the block if we padded. */
657  if (pad) {
658  uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
659 
660  jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
661  (wbuf_jeb == c->nextblock) ? "next" : "",
662  wbuf_jeb->offset);
663 
664  /* wbuf_pagesize - wbuf_len is the amount of space that's to be
665  padded. If there is less free space in the block than that,
666  something screwed up */
667  if (wbuf_jeb->free_size < waste) {
668  pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
669  c->wbuf_ofs, c->wbuf_len, waste);
670  pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
671  wbuf_jeb->offset, wbuf_jeb->free_size);
672  BUG();
673  }
674 
675  spin_lock(&c->erase_completion_lock);
676 
677  jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
678  /* FIXME: that made it count as dirty. Convert to wasted */
679  wbuf_jeb->dirty_size -= waste;
680  c->dirty_size -= waste;
681  wbuf_jeb->wasted_size += waste;
682  c->wasted_size += waste;
683  } else
684  spin_lock(&c->erase_completion_lock);
685 
686  /* Stick any now-obsoleted blocks on the erase_pending_list */
687  jffs2_refile_wbuf_blocks(c);
688  jffs2_clear_wbuf_ino_list(c);
689  spin_unlock(&c->erase_completion_lock);
690 
691  memset(c->wbuf,0xff,c->wbuf_pagesize);
692  /* adjust write buffer offset, else we get a non contiguous write bug */
693  c->wbuf_ofs += c->wbuf_pagesize;
694  c->wbuf_len = 0;
695  return 0;
696 }
697 
698 /* Trigger garbage collection to flush the write-buffer.
699  If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
700  outstanding. If ino arg non-zero, do it only if a write for the
701  given inode is outstanding. */
703 {
704  uint32_t old_wbuf_ofs;
705  uint32_t old_wbuf_len;
706  int ret = 0;
707 
708  jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
709 
710  if (!c->wbuf)
711  return 0;
712 
713  mutex_lock(&c->alloc_sem);
714  if (!jffs2_wbuf_pending_for_ino(c, ino)) {
715  jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
716  mutex_unlock(&c->alloc_sem);
717  return 0;
718  }
719 
720  old_wbuf_ofs = c->wbuf_ofs;
721  old_wbuf_len = c->wbuf_len;
722 
723  if (c->unchecked_size) {
724  /* GC won't make any progress for a while */
725  jffs2_dbg(1, "%s(): padding. Not finished checking\n",
726  __func__);
727  down_write(&c->wbuf_sem);
728  ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
729  /* retry flushing wbuf in case jffs2_wbuf_recover
730  left some data in the wbuf */
731  if (ret)
732  ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
733  up_write(&c->wbuf_sem);
734  } else while (old_wbuf_len &&
735  old_wbuf_ofs == c->wbuf_ofs) {
736 
737  mutex_unlock(&c->alloc_sem);
738 
739  jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
740 
742  if (ret) {
743  /* GC failed. Flush it with padding instead */
744  mutex_lock(&c->alloc_sem);
745  down_write(&c->wbuf_sem);
746  ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
747  /* retry flushing wbuf in case jffs2_wbuf_recover
748  left some data in the wbuf */
749  if (ret)
750  ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
751  up_write(&c->wbuf_sem);
752  break;
753  }
754  mutex_lock(&c->alloc_sem);
755  }
756 
757  jffs2_dbg(1, "%s(): ends...\n", __func__);
758 
759  mutex_unlock(&c->alloc_sem);
760  return ret;
761 }
762 
763 /* Pad write-buffer to end and write it, wasting space. */
765 {
766  int ret;
767 
768  if (!c->wbuf)
769  return 0;
770 
771  down_write(&c->wbuf_sem);
772  ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
773  /* retry - maybe wbuf recover left some data in wbuf. */
774  if (ret)
775  ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
776  up_write(&c->wbuf_sem);
777 
778  return ret;
779 }
780 
781 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
782  size_t len)
783 {
784  if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
785  return 0;
786 
787  if (len > (c->wbuf_pagesize - c->wbuf_len))
788  len = c->wbuf_pagesize - c->wbuf_len;
789  memcpy(c->wbuf + c->wbuf_len, buf, len);
790  c->wbuf_len += (uint32_t) len;
791  return len;
792 }
793 
794 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
795  unsigned long count, loff_t to, size_t *retlen,
796  uint32_t ino)
797 {
798  struct jffs2_eraseblock *jeb;
799  size_t wbuf_retlen, donelen = 0;
800  uint32_t outvec_to = to;
801  int ret, invec;
802 
803  /* If not writebuffered flash, don't bother */
804  if (!jffs2_is_writebuffered(c))
805  return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
806 
807  down_write(&c->wbuf_sem);
808 
809  /* If wbuf_ofs is not initialized, set it to target address */
810  if (c->wbuf_ofs == 0xFFFFFFFF) {
811  c->wbuf_ofs = PAGE_DIV(to);
812  c->wbuf_len = PAGE_MOD(to);
813  memset(c->wbuf,0xff,c->wbuf_pagesize);
814  }
815 
816  /*
817  * Sanity checks on target address. It's permitted to write
818  * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
819  * write at the beginning of a new erase block. Anything else,
820  * and you die. New block starts at xxx000c (0-b = block
821  * header)
822  */
823  if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
824  /* It's a write to a new block */
825  if (c->wbuf_len) {
826  jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
827  __func__, (unsigned long)to, c->wbuf_ofs);
828  ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
829  if (ret)
830  goto outerr;
831  }
832  /* set pointer to new block */
833  c->wbuf_ofs = PAGE_DIV(to);
834  c->wbuf_len = PAGE_MOD(to);
835  }
836 
837  if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
838  /* We're not writing immediately after the writebuffer. Bad. */
839  pr_crit("%s(): Non-contiguous write to %08lx\n",
840  __func__, (unsigned long)to);
841  if (c->wbuf_len)
842  pr_crit("wbuf was previously %08x-%08x\n",
843  c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
844  BUG();
845  }
846 
847  /* adjust alignment offset */
848  if (c->wbuf_len != PAGE_MOD(to)) {
849  c->wbuf_len = PAGE_MOD(to);
850  /* take care of alignment to next page */
851  if (!c->wbuf_len) {
852  c->wbuf_len = c->wbuf_pagesize;
853  ret = __jffs2_flush_wbuf(c, NOPAD);
854  if (ret)
855  goto outerr;
856  }
857  }
858 
859  for (invec = 0; invec < count; invec++) {
860  int vlen = invecs[invec].iov_len;
861  uint8_t *v = invecs[invec].iov_base;
862 
863  wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
864 
865  if (c->wbuf_len == c->wbuf_pagesize) {
866  ret = __jffs2_flush_wbuf(c, NOPAD);
867  if (ret)
868  goto outerr;
869  }
870  vlen -= wbuf_retlen;
871  outvec_to += wbuf_retlen;
872  donelen += wbuf_retlen;
873  v += wbuf_retlen;
874 
875  if (vlen >= c->wbuf_pagesize) {
876  ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
877  &wbuf_retlen, v);
878  if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
879  goto outfile;
880 
881  vlen -= wbuf_retlen;
882  outvec_to += wbuf_retlen;
883  c->wbuf_ofs = outvec_to;
884  donelen += wbuf_retlen;
885  v += wbuf_retlen;
886  }
887 
888  wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
889  if (c->wbuf_len == c->wbuf_pagesize) {
890  ret = __jffs2_flush_wbuf(c, NOPAD);
891  if (ret)
892  goto outerr;
893  }
894 
895  outvec_to += wbuf_retlen;
896  donelen += wbuf_retlen;
897  }
898 
899  /*
900  * If there's a remainder in the wbuf and it's a non-GC write,
901  * remember that the wbuf affects this ino
902  */
903  *retlen = donelen;
904 
905  if (jffs2_sum_active()) {
906  int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
907  if (res)
908  return res;
909  }
910 
911  if (c->wbuf_len && ino)
912  jffs2_wbuf_dirties_inode(c, ino);
913 
914  ret = 0;
915  up_write(&c->wbuf_sem);
916  return ret;
917 
918 outfile:
919  /*
920  * At this point we have no problem, c->wbuf is empty. However
921  * refile nextblock to avoid writing again to same address.
922  */
923 
924  spin_lock(&c->erase_completion_lock);
925 
926  jeb = &c->blocks[outvec_to / c->sector_size];
927  jffs2_block_refile(c, jeb, REFILE_ANYWAY);
928 
929  spin_unlock(&c->erase_completion_lock);
930 
931 outerr:
932  *retlen = 0;
933  up_write(&c->wbuf_sem);
934  return ret;
935 }
936 
937 /*
938  * This is the entry for flash write.
939  * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
940 */
941 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
942  size_t *retlen, const u_char *buf)
943 {
944  struct kvec vecs[1];
945 
946  if (!jffs2_is_writebuffered(c))
947  return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
948 
949  vecs[0].iov_base = (unsigned char *) buf;
950  vecs[0].iov_len = len;
951  return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
952 }
953 
954 /*
955  Handle readback from writebuffer and ECC failure return
956 */
957 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
958 {
959  loff_t orbf = 0, owbf = 0, lwbf = 0;
960  int ret;
961 
962  if (!jffs2_is_writebuffered(c))
963  return mtd_read(c->mtd, ofs, len, retlen, buf);
964 
965  /* Read flash */
966  down_read(&c->wbuf_sem);
967  ret = mtd_read(c->mtd, ofs, len, retlen, buf);
968 
969  if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
970  if (ret == -EBADMSG)
971  pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
972  len, ofs);
973  /*
974  * We have the raw data without ECC correction in the buffer,
975  * maybe we are lucky and all data or parts are correct. We
976  * check the node. If data are corrupted node check will sort
977  * it out. We keep this block, it will fail on write or erase
978  * and the we mark it bad. Or should we do that now? But we
979  * should give him a chance. Maybe we had a system crash or
980  * power loss before the ecc write or a erase was completed.
981  * So we return success. :)
982  */
983  ret = 0;
984  }
985 
986  /* if no writebuffer available or write buffer empty, return */
987  if (!c->wbuf_pagesize || !c->wbuf_len)
988  goto exit;
989 
990  /* if we read in a different block, return */
991  if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
992  goto exit;
993 
994  if (ofs >= c->wbuf_ofs) {
995  owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
996  if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
997  goto exit;
998  lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
999  if (lwbf > len)
1000  lwbf = len;
1001  } else {
1002  orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
1003  if (orbf > len) /* is write beyond write buffer ? */
1004  goto exit;
1005  lwbf = len - orbf; /* number of bytes to copy */
1006  if (lwbf > c->wbuf_len)
1007  lwbf = c->wbuf_len;
1008  }
1009  if (lwbf > 0)
1010  memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1011 
1012 exit:
1013  up_read(&c->wbuf_sem);
1014  return ret;
1015 }
1016 
1017 #define NR_OOB_SCAN_PAGES 4
1018 
1019 /* For historical reasons we use only 8 bytes for OOB clean marker */
1020 #define OOB_CM_SIZE 8
1021 
1022 static const struct jffs2_unknown_node oob_cleanmarker =
1023 {
1026  .totlen = constant_cpu_to_je32(8)
1027 };
1028 
1029 /*
1030  * Check, if the out of band area is empty. This function knows about the clean
1031  * marker and if it is present in OOB, treats the OOB as empty anyway.
1032  */
1034  struct jffs2_eraseblock *jeb, int mode)
1035 {
1036  int i, ret;
1037  int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1038  struct mtd_oob_ops ops;
1039 
1040  ops.mode = MTD_OPS_AUTO_OOB;
1041  ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1042  ops.oobbuf = c->oobbuf;
1043  ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1044  ops.datbuf = NULL;
1045 
1046  ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1047  if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1048  pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1049  jeb->offset, ops.ooblen, ops.oobretlen, ret);
1050  if (!ret || mtd_is_bitflip(ret))
1051  ret = -EIO;
1052  return ret;
1053  }
1054 
1055  for(i = 0; i < ops.ooblen; i++) {
1056  if (mode && i < cmlen)
1057  /* Yeah, we know about the cleanmarker */
1058  continue;
1059 
1060  if (ops.oobbuf[i] != 0xFF) {
1061  jffs2_dbg(2, "Found %02x at %x in OOB for "
1062  "%08x\n", ops.oobbuf[i], i, jeb->offset);
1063  return 1;
1064  }
1065  }
1066 
1067  return 0;
1068 }
1069 
1070 /*
1071  * Check for a valid cleanmarker.
1072  * Returns: 0 if a valid cleanmarker was found
1073  * 1 if no cleanmarker was found
1074  * negative error code if an error occurred
1075  */
1077  struct jffs2_eraseblock *jeb)
1078 {
1079  struct mtd_oob_ops ops;
1080  int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1081 
1082  ops.mode = MTD_OPS_AUTO_OOB;
1083  ops.ooblen = cmlen;
1084  ops.oobbuf = c->oobbuf;
1085  ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1086  ops.datbuf = NULL;
1087 
1088  ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1089  if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1090  pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1091  jeb->offset, ops.ooblen, ops.oobretlen, ret);
1092  if (!ret || mtd_is_bitflip(ret))
1093  ret = -EIO;
1094  return ret;
1095  }
1096 
1097  return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1098 }
1099 
1101  struct jffs2_eraseblock *jeb)
1102 {
1103  int ret;
1104  struct mtd_oob_ops ops;
1105  int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1106 
1107  ops.mode = MTD_OPS_AUTO_OOB;
1108  ops.ooblen = cmlen;
1109  ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1110  ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1111  ops.datbuf = NULL;
1112 
1113  ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1114  if (ret || ops.oobretlen != ops.ooblen) {
1115  pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1116  jeb->offset, ops.ooblen, ops.oobretlen, ret);
1117  if (!ret)
1118  ret = -EIO;
1119  return ret;
1120  }
1121 
1122  return 0;
1123 }
1124 
1125 /*
1126  * On NAND we try to mark this block bad. If the block was erased more
1127  * than MAX_ERASE_FAILURES we mark it finally bad.
1128  * Don't care about failures. This block remains on the erase-pending
1129  * or badblock list as long as nobody manipulates the flash with
1130  * a bootloader or something like that.
1131  */
1132 
1133 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1134 {
1135  int ret;
1136 
1137  /* if the count is < max, we try to write the counter to the 2nd page oob area */
1138  if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1139  return 0;
1140 
1141  pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
1142  ret = mtd_block_markbad(c->mtd, bad_offset);
1143 
1144  if (ret) {
1145  jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1146  __func__, jeb->offset, ret);
1147  return ret;
1148  }
1149  return 1;
1150 }
1151 
1152 static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
1153 {
1154  struct delayed_work *dwork;
1155 
1156  dwork = container_of(work, struct delayed_work, work);
1157  return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
1158 }
1159 
1160 static void delayed_wbuf_sync(struct work_struct *work)
1161 {
1162  struct jffs2_sb_info *c = work_to_sb(work);
1163  struct super_block *sb = OFNI_BS_2SFFJ(c);
1164 
1165  spin_lock(&c->wbuf_dwork_lock);
1166  c->wbuf_queued = 0;
1167  spin_unlock(&c->wbuf_dwork_lock);
1168 
1169  if (!(sb->s_flags & MS_RDONLY)) {
1170  jffs2_dbg(1, "%s()\n", __func__);
1171  jffs2_flush_wbuf_gc(c, 0);
1172  }
1173 }
1174 
1176 {
1177  struct super_block *sb = OFNI_BS_2SFFJ(c);
1178  unsigned long delay;
1179 
1180  if (sb->s_flags & MS_RDONLY)
1181  return;
1182 
1183  spin_lock(&c->wbuf_dwork_lock);
1184  if (!c->wbuf_queued) {
1185  jffs2_dbg(1, "%s()\n", __func__);
1187  queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
1188  c->wbuf_queued = 1;
1189  }
1190  spin_unlock(&c->wbuf_dwork_lock);
1191 }
1192 
1194 {
1195  struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1196 
1197  if (!c->mtd->oobsize)
1198  return 0;
1199 
1200  /* Cleanmarker is out-of-band, so inline size zero */
1201  c->cleanmarker_size = 0;
1202 
1203  if (!oinfo || oinfo->oobavail == 0) {
1204  pr_err("inconsistent device description\n");
1205  return -EINVAL;
1206  }
1207 
1208  jffs2_dbg(1, "using OOB on NAND\n");
1209 
1210  c->oobavail = oinfo->oobavail;
1211 
1212  /* Initialise write buffer */
1213  init_rwsem(&c->wbuf_sem);
1214  spin_lock_init(&c->wbuf_dwork_lock);
1215  INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1216  c->wbuf_pagesize = c->mtd->writesize;
1217  c->wbuf_ofs = 0xFFFFFFFF;
1218 
1219  c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1220  if (!c->wbuf)
1221  return -ENOMEM;
1222 
1223  c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1224  if (!c->oobbuf) {
1225  kfree(c->wbuf);
1226  return -ENOMEM;
1227  }
1228 
1229 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1230  c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1231  if (!c->wbuf_verify) {
1232  kfree(c->oobbuf);
1233  kfree(c->wbuf);
1234  return -ENOMEM;
1235  }
1236 #endif
1237  return 0;
1238 }
1239 
1241 {
1242 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1243  kfree(c->wbuf_verify);
1244 #endif
1245  kfree(c->wbuf);
1246  kfree(c->oobbuf);
1247 }
1248 
1250  c->cleanmarker_size = 0; /* No cleanmarkers needed */
1251 
1252  /* Initialize write buffer */
1253  init_rwsem(&c->wbuf_sem);
1254  spin_lock_init(&c->wbuf_dwork_lock);
1255  INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1256  c->wbuf_pagesize = c->mtd->erasesize;
1257 
1258  /* Find a suitable c->sector_size
1259  * - Not too much sectors
1260  * - Sectors have to be at least 4 K + some bytes
1261  * - All known dataflashes have erase sizes of 528 or 1056
1262  * - we take at least 8 eraseblocks and want to have at least 8K size
1263  * - The concatenation should be a power of 2
1264  */
1265 
1266  c->sector_size = 8 * c->mtd->erasesize;
1267 
1268  while (c->sector_size < 8192) {
1269  c->sector_size *= 2;
1270  }
1271 
1272  /* It may be necessary to adjust the flash size */
1273  c->flash_size = c->mtd->size;
1274 
1275  if ((c->flash_size % c->sector_size) != 0) {
1276  c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1277  pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1278  };
1279 
1280  c->wbuf_ofs = 0xFFFFFFFF;
1281  c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1282  if (!c->wbuf)
1283  return -ENOMEM;
1284 
1285 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1286  c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1287  if (!c->wbuf_verify) {
1288  kfree(c->oobbuf);
1289  kfree(c->wbuf);
1290  return -ENOMEM;
1291  }
1292 #endif
1293 
1294  pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1295  c->wbuf_pagesize, c->sector_size);
1296 
1297  return 0;
1298 }
1299 
1301 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1302  kfree(c->wbuf_verify);
1303 #endif
1304  kfree(c->wbuf);
1305 }
1306 
1308  /* Cleanmarker currently occupies whole programming regions,
1309  * either one or 2 for 8Byte STMicro flashes. */
1310  c->cleanmarker_size = max(16u, c->mtd->writesize);
1311 
1312  /* Initialize write buffer */
1313  init_rwsem(&c->wbuf_sem);
1314  spin_lock_init(&c->wbuf_dwork_lock);
1315  INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1316 
1317  c->wbuf_pagesize = c->mtd->writesize;
1318  c->wbuf_ofs = 0xFFFFFFFF;
1319 
1320  c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1321  if (!c->wbuf)
1322  return -ENOMEM;
1323 
1324 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1325  c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1326  if (!c->wbuf_verify) {
1327  kfree(c->wbuf);
1328  return -ENOMEM;
1329  }
1330 #endif
1331  return 0;
1332 }
1333 
1335 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1336  kfree(c->wbuf_verify);
1337 #endif
1338  kfree(c->wbuf);
1339 }
1340 
1342  c->cleanmarker_size = 0;
1343 
1344  if (c->mtd->writesize == 1)
1345  /* We do not need write-buffer */
1346  return 0;
1347 
1348  init_rwsem(&c->wbuf_sem);
1349  spin_lock_init(&c->wbuf_dwork_lock);
1350  INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1351 
1352  c->wbuf_pagesize = c->mtd->writesize;
1353  c->wbuf_ofs = 0xFFFFFFFF;
1354  c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1355  if (!c->wbuf)
1356  return -ENOMEM;
1357 
1358  pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1359  c->wbuf_pagesize, c->sector_size);
1360 
1361  return 0;
1362 }
1363 
1365  kfree(c->wbuf);
1366 }