Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
eba.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Author: Artem Bityutskiy (Битюцкий Артём)
19  */
20 
21 /*
22  * The UBI Eraseblock Association (EBA) sub-system.
23  *
24  * This sub-system is responsible for I/O to/from logical eraseblock.
25  *
26  * Although in this implementation the EBA table is fully kept and managed in
27  * RAM, which assumes poor scalability, it might be (partially) maintained on
28  * flash in future implementations.
29  *
30  * The EBA sub-system implements per-logical eraseblock locking. Before
31  * accessing a logical eraseblock it is locked for reading or writing. The
32  * per-logical eraseblock locking is implemented by means of the lock tree. The
33  * lock tree is an RB-tree which refers all the currently locked logical
34  * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35  * They are indexed by (@vol_id, @lnum) pairs.
36  *
37  * EBA also maintains the global sequence counter which is incremented each
38  * time a logical eraseblock is mapped to a physical eraseblock and it is
39  * stored in the volume identifier header. This means that each VID header has
40  * a unique sequence number. The sequence number is only increased an we assume
41  * 64 bits is enough to never overflow.
42  */
43 
44 #include <linux/slab.h>
45 #include <linux/crc32.h>
46 #include <linux/err.h>
47 #include "ubi.h"
48 
49 /* Number of physical eraseblocks reserved for atomic LEB change operation */
50 #define EBA_RESERVED_PEBS 1
51 
60 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
61 {
62  unsigned long long sqnum;
63 
64  spin_lock(&ubi->ltree_lock);
65  sqnum = ubi->global_sqnum++;
66  spin_unlock(&ubi->ltree_lock);
67 
68  return sqnum;
69 }
70 
79 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
80 {
81  if (vol_id == UBI_LAYOUT_VOLUME_ID)
83  return 0;
84 }
85 
96 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
97  int lnum)
98 {
99  struct rb_node *p;
100 
101  p = ubi->ltree.rb_node;
102  while (p) {
103  struct ubi_ltree_entry *le;
104 
105  le = rb_entry(p, struct ubi_ltree_entry, rb);
106 
107  if (vol_id < le->vol_id)
108  p = p->rb_left;
109  else if (vol_id > le->vol_id)
110  p = p->rb_right;
111  else {
112  if (lnum < le->lnum)
113  p = p->rb_left;
114  else if (lnum > le->lnum)
115  p = p->rb_right;
116  else
117  return le;
118  }
119  }
120 
121  return NULL;
122 }
123 
135 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
136  int vol_id, int lnum)
137 {
138  struct ubi_ltree_entry *le, *le1, *le_free;
139 
140  le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
141  if (!le)
142  return ERR_PTR(-ENOMEM);
143 
144  le->users = 0;
145  init_rwsem(&le->mutex);
146  le->vol_id = vol_id;
147  le->lnum = lnum;
148 
149  spin_lock(&ubi->ltree_lock);
150  le1 = ltree_lookup(ubi, vol_id, lnum);
151 
152  if (le1) {
153  /*
154  * This logical eraseblock is already locked. The newly
155  * allocated lock entry is not needed.
156  */
157  le_free = le;
158  le = le1;
159  } else {
160  struct rb_node **p, *parent = NULL;
161 
162  /*
163  * No lock entry, add the newly allocated one to the
164  * @ubi->ltree RB-tree.
165  */
166  le_free = NULL;
167 
168  p = &ubi->ltree.rb_node;
169  while (*p) {
170  parent = *p;
171  le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
172 
173  if (vol_id < le1->vol_id)
174  p = &(*p)->rb_left;
175  else if (vol_id > le1->vol_id)
176  p = &(*p)->rb_right;
177  else {
178  ubi_assert(lnum != le1->lnum);
179  if (lnum < le1->lnum)
180  p = &(*p)->rb_left;
181  else
182  p = &(*p)->rb_right;
183  }
184  }
185 
186  rb_link_node(&le->rb, parent, p);
187  rb_insert_color(&le->rb, &ubi->ltree);
188  }
189  le->users += 1;
190  spin_unlock(&ubi->ltree_lock);
191 
192  kfree(le_free);
193  return le;
194 }
195 
205 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
206 {
207  struct ubi_ltree_entry *le;
208 
209  le = ltree_add_entry(ubi, vol_id, lnum);
210  if (IS_ERR(le))
211  return PTR_ERR(le);
212  down_read(&le->mutex);
213  return 0;
214 }
215 
222 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
223 {
224  struct ubi_ltree_entry *le;
225 
226  spin_lock(&ubi->ltree_lock);
227  le = ltree_lookup(ubi, vol_id, lnum);
228  le->users -= 1;
229  ubi_assert(le->users >= 0);
230  up_read(&le->mutex);
231  if (le->users == 0) {
232  rb_erase(&le->rb, &ubi->ltree);
233  kfree(le);
234  }
235  spin_unlock(&ubi->ltree_lock);
236 }
237 
247 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
248 {
249  struct ubi_ltree_entry *le;
250 
251  le = ltree_add_entry(ubi, vol_id, lnum);
252  if (IS_ERR(le))
253  return PTR_ERR(le);
254  down_write(&le->mutex);
255  return 0;
256 }
257 
269 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
270 {
271  struct ubi_ltree_entry *le;
272 
273  le = ltree_add_entry(ubi, vol_id, lnum);
274  if (IS_ERR(le))
275  return PTR_ERR(le);
276  if (down_write_trylock(&le->mutex))
277  return 0;
278 
279  /* Contention, cancel */
280  spin_lock(&ubi->ltree_lock);
281  le->users -= 1;
282  ubi_assert(le->users >= 0);
283  if (le->users == 0) {
284  rb_erase(&le->rb, &ubi->ltree);
285  kfree(le);
286  }
287  spin_unlock(&ubi->ltree_lock);
288 
289  return 1;
290 }
291 
298 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
299 {
300  struct ubi_ltree_entry *le;
301 
302  spin_lock(&ubi->ltree_lock);
303  le = ltree_lookup(ubi, vol_id, lnum);
304  le->users -= 1;
305  ubi_assert(le->users >= 0);
306  up_write(&le->mutex);
307  if (le->users == 0) {
308  rb_erase(&le->rb, &ubi->ltree);
309  kfree(le);
310  }
311  spin_unlock(&ubi->ltree_lock);
312 }
313 
324 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
325  int lnum)
326 {
327  int err, pnum, vol_id = vol->vol_id;
328 
329  if (ubi->ro_mode)
330  return -EROFS;
331 
332  err = leb_write_lock(ubi, vol_id, lnum);
333  if (err)
334  return err;
335 
336  pnum = vol->eba_tbl[lnum];
337  if (pnum < 0)
338  /* This logical eraseblock is already unmapped */
339  goto out_unlock;
340 
341  dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342 
343  down_read(&ubi->fm_sem);
344  vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345  up_read(&ubi->fm_sem);
346  err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
347 
348 out_unlock:
349  leb_write_unlock(ubi, vol_id, lnum);
350  return err;
351 }
352 
372 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
373  void *buf, int offset, int len, int check)
374 {
375  int err, pnum, scrub = 0, vol_id = vol->vol_id;
376  struct ubi_vid_hdr *vid_hdr;
378 
379  err = leb_read_lock(ubi, vol_id, lnum);
380  if (err)
381  return err;
382 
383  pnum = vol->eba_tbl[lnum];
384  if (pnum < 0) {
385  /*
386  * The logical eraseblock is not mapped, fill the whole buffer
387  * with 0xFF bytes. The exception is static volumes for which
388  * it is an error to read unmapped logical eraseblocks.
389  */
390  dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
391  len, offset, vol_id, lnum);
392  leb_read_unlock(ubi, vol_id, lnum);
394  memset(buf, 0xFF, len);
395  return 0;
396  }
397 
398  dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
399  len, offset, vol_id, lnum, pnum);
400 
401  if (vol->vol_type == UBI_DYNAMIC_VOLUME)
402  check = 0;
403 
404 retry:
405  if (check) {
406  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
407  if (!vid_hdr) {
408  err = -ENOMEM;
409  goto out_unlock;
410  }
411 
412  err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
413  if (err && err != UBI_IO_BITFLIPS) {
414  if (err > 0) {
415  /*
416  * The header is either absent or corrupted.
417  * The former case means there is a bug -
418  * switch to read-only mode just in case.
419  * The latter case means a real corruption - we
420  * may try to recover data. FIXME: but this is
421  * not implemented.
422  */
423  if (err == UBI_IO_BAD_HDR_EBADMSG ||
424  err == UBI_IO_BAD_HDR) {
425  ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
426  pnum, vol_id, lnum);
427  err = -EBADMSG;
428  } else
429  ubi_ro_mode(ubi);
430  }
431  goto out_free;
432  } else if (err == UBI_IO_BITFLIPS)
433  scrub = 1;
434 
435  ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
436  ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
437 
438  crc = be32_to_cpu(vid_hdr->data_crc);
439  ubi_free_vid_hdr(ubi, vid_hdr);
440  }
441 
442  err = ubi_io_read_data(ubi, buf, pnum, offset, len);
443  if (err) {
444  if (err == UBI_IO_BITFLIPS) {
445  scrub = 1;
446  err = 0;
447  } else if (mtd_is_eccerr(err)) {
448  if (vol->vol_type == UBI_DYNAMIC_VOLUME)
449  goto out_unlock;
450  scrub = 1;
451  if (!check) {
452  ubi_msg("force data checking");
453  check = 1;
454  goto retry;
455  }
456  } else
457  goto out_unlock;
458  }
459 
460  if (check) {
461  uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
462  if (crc1 != crc) {
463  ubi_warn("CRC error: calculated %#08x, must be %#08x",
464  crc1, crc);
465  err = -EBADMSG;
466  goto out_unlock;
467  }
468  }
469 
470  if (scrub)
471  err = ubi_wl_scrub_peb(ubi, pnum);
472 
473  leb_read_unlock(ubi, vol_id, lnum);
474  return err;
475 
476 out_free:
477  ubi_free_vid_hdr(ubi, vid_hdr);
478 out_unlock:
479  leb_read_unlock(ubi, vol_id, lnum);
480  return err;
481 }
482 
499 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
500  const void *buf, int offset, int len)
501 {
502  int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
503  struct ubi_volume *vol = ubi->volumes[idx];
504  struct ubi_vid_hdr *vid_hdr;
505 
506  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
507  if (!vid_hdr)
508  return -ENOMEM;
509 
510 retry:
511  new_pnum = ubi_wl_get_peb(ubi);
512  if (new_pnum < 0) {
513  ubi_free_vid_hdr(ubi, vid_hdr);
514  return new_pnum;
515  }
516 
517  ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
518 
519  err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
520  if (err && err != UBI_IO_BITFLIPS) {
521  if (err > 0)
522  err = -EIO;
523  goto out_put;
524  }
525 
526  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
527  err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
528  if (err)
529  goto write_error;
530 
531  data_size = offset + len;
532  mutex_lock(&ubi->buf_mutex);
533  memset(ubi->peb_buf + offset, 0xFF, len);
534 
535  /* Read everything before the area where the write failure happened */
536  if (offset > 0) {
537  err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
538  if (err && err != UBI_IO_BITFLIPS)
539  goto out_unlock;
540  }
541 
542  memcpy(ubi->peb_buf + offset, buf, len);
543 
544  err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
545  if (err) {
546  mutex_unlock(&ubi->buf_mutex);
547  goto write_error;
548  }
549 
550  mutex_unlock(&ubi->buf_mutex);
551  ubi_free_vid_hdr(ubi, vid_hdr);
552 
553  down_read(&ubi->fm_sem);
554  vol->eba_tbl[lnum] = new_pnum;
555  up_read(&ubi->fm_sem);
556  ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
557 
558  ubi_msg("data was successfully recovered");
559  return 0;
560 
561 out_unlock:
562  mutex_unlock(&ubi->buf_mutex);
563 out_put:
564  ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
565  ubi_free_vid_hdr(ubi, vid_hdr);
566  return err;
567 
568 write_error:
569  /*
570  * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
571  * get another one.
572  */
573  ubi_warn("failed to write to PEB %d", new_pnum);
574  ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
575  if (++tries > UBI_IO_RETRIES) {
576  ubi_free_vid_hdr(ubi, vid_hdr);
577  return err;
578  }
579  ubi_msg("try again");
580  goto retry;
581 }
582 
597 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
598  const void *buf, int offset, int len)
599 {
600  int err, pnum, tries = 0, vol_id = vol->vol_id;
601  struct ubi_vid_hdr *vid_hdr;
602 
603  if (ubi->ro_mode)
604  return -EROFS;
605 
606  err = leb_write_lock(ubi, vol_id, lnum);
607  if (err)
608  return err;
609 
610  pnum = vol->eba_tbl[lnum];
611  if (pnum >= 0) {
612  dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
613  len, offset, vol_id, lnum, pnum);
614 
615  err = ubi_io_write_data(ubi, buf, pnum, offset, len);
616  if (err) {
617  ubi_warn("failed to write data to PEB %d", pnum);
618  if (err == -EIO && ubi->bad_allowed)
619  err = recover_peb(ubi, pnum, vol_id, lnum, buf,
620  offset, len);
621  if (err)
622  ubi_ro_mode(ubi);
623  }
624  leb_write_unlock(ubi, vol_id, lnum);
625  return err;
626  }
627 
628  /*
629  * The logical eraseblock is not mapped. We have to get a free physical
630  * eraseblock and write the volume identifier header there first.
631  */
632  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
633  if (!vid_hdr) {
634  leb_write_unlock(ubi, vol_id, lnum);
635  return -ENOMEM;
636  }
637 
638  vid_hdr->vol_type = UBI_VID_DYNAMIC;
639  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
640  vid_hdr->vol_id = cpu_to_be32(vol_id);
641  vid_hdr->lnum = cpu_to_be32(lnum);
642  vid_hdr->compat = ubi_get_compat(ubi, vol_id);
643  vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
644 
645 retry:
646  pnum = ubi_wl_get_peb(ubi);
647  if (pnum < 0) {
648  ubi_free_vid_hdr(ubi, vid_hdr);
649  leb_write_unlock(ubi, vol_id, lnum);
650  return pnum;
651  }
652 
653  dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
654  len, offset, vol_id, lnum, pnum);
655 
656  err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
657  if (err) {
658  ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
659  vol_id, lnum, pnum);
660  goto write_error;
661  }
662 
663  if (len) {
664  err = ubi_io_write_data(ubi, buf, pnum, offset, len);
665  if (err) {
666  ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
667  len, offset, vol_id, lnum, pnum);
668  goto write_error;
669  }
670  }
671 
672  down_read(&ubi->fm_sem);
673  vol->eba_tbl[lnum] = pnum;
674  up_read(&ubi->fm_sem);
675 
676  leb_write_unlock(ubi, vol_id, lnum);
677  ubi_free_vid_hdr(ubi, vid_hdr);
678  return 0;
679 
680 write_error:
681  if (err != -EIO || !ubi->bad_allowed) {
682  ubi_ro_mode(ubi);
683  leb_write_unlock(ubi, vol_id, lnum);
684  ubi_free_vid_hdr(ubi, vid_hdr);
685  return err;
686  }
687 
688  /*
689  * Fortunately, this is the first write operation to this physical
690  * eraseblock, so just put it and request a new one. We assume that if
691  * this physical eraseblock went bad, the erase code will handle that.
692  */
693  err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
694  if (err || ++tries > UBI_IO_RETRIES) {
695  ubi_ro_mode(ubi);
696  leb_write_unlock(ubi, vol_id, lnum);
697  ubi_free_vid_hdr(ubi, vid_hdr);
698  return err;
699  }
700 
701  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
702  ubi_msg("try another PEB");
703  goto retry;
704 }
705 
728 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
729  int lnum, const void *buf, int len, int used_ebs)
730 {
731  int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
732  struct ubi_vid_hdr *vid_hdr;
733  uint32_t crc;
734 
735  if (ubi->ro_mode)
736  return -EROFS;
737 
738  if (lnum == used_ebs - 1)
739  /* If this is the last LEB @len may be unaligned */
740  len = ALIGN(data_size, ubi->min_io_size);
741  else
742  ubi_assert(!(len & (ubi->min_io_size - 1)));
743 
744  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
745  if (!vid_hdr)
746  return -ENOMEM;
747 
748  err = leb_write_lock(ubi, vol_id, lnum);
749  if (err) {
750  ubi_free_vid_hdr(ubi, vid_hdr);
751  return err;
752  }
753 
754  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
755  vid_hdr->vol_id = cpu_to_be32(vol_id);
756  vid_hdr->lnum = cpu_to_be32(lnum);
757  vid_hdr->compat = ubi_get_compat(ubi, vol_id);
758  vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
759 
760  crc = crc32(UBI_CRC32_INIT, buf, data_size);
761  vid_hdr->vol_type = UBI_VID_STATIC;
762  vid_hdr->data_size = cpu_to_be32(data_size);
763  vid_hdr->used_ebs = cpu_to_be32(used_ebs);
764  vid_hdr->data_crc = cpu_to_be32(crc);
765 
766 retry:
767  pnum = ubi_wl_get_peb(ubi);
768  if (pnum < 0) {
769  ubi_free_vid_hdr(ubi, vid_hdr);
770  leb_write_unlock(ubi, vol_id, lnum);
771  return pnum;
772  }
773 
774  dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
775  len, vol_id, lnum, pnum, used_ebs);
776 
777  err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
778  if (err) {
779  ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
780  vol_id, lnum, pnum);
781  goto write_error;
782  }
783 
784  err = ubi_io_write_data(ubi, buf, pnum, 0, len);
785  if (err) {
786  ubi_warn("failed to write %d bytes of data to PEB %d",
787  len, pnum);
788  goto write_error;
789  }
790 
791  ubi_assert(vol->eba_tbl[lnum] < 0);
792  down_read(&ubi->fm_sem);
793  vol->eba_tbl[lnum] = pnum;
794  up_read(&ubi->fm_sem);
795 
796  leb_write_unlock(ubi, vol_id, lnum);
797  ubi_free_vid_hdr(ubi, vid_hdr);
798  return 0;
799 
800 write_error:
801  if (err != -EIO || !ubi->bad_allowed) {
802  /*
803  * This flash device does not admit of bad eraseblocks or
804  * something nasty and unexpected happened. Switch to read-only
805  * mode just in case.
806  */
807  ubi_ro_mode(ubi);
808  leb_write_unlock(ubi, vol_id, lnum);
809  ubi_free_vid_hdr(ubi, vid_hdr);
810  return err;
811  }
812 
813  err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
814  if (err || ++tries > UBI_IO_RETRIES) {
815  ubi_ro_mode(ubi);
816  leb_write_unlock(ubi, vol_id, lnum);
817  ubi_free_vid_hdr(ubi, vid_hdr);
818  return err;
819  }
820 
821  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
822  ubi_msg("try another PEB");
823  goto retry;
824 }
825 
826 /*
827  * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
828  * @ubi: UBI device description object
829  * @vol: volume description object
830  * @lnum: logical eraseblock number
831  * @buf: data to write
832  * @len: how many bytes to write
833  *
834  * This function changes the contents of a logical eraseblock atomically. @buf
835  * has to contain new logical eraseblock data, and @len - the length of the
836  * data, which has to be aligned. This function guarantees that in case of an
837  * unclean reboot the old contents is preserved. Returns zero in case of
838  * success and a negative error code in case of failure.
839  *
840  * UBI reserves one LEB for the "atomic LEB change" operation, so only one
841  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
842  */
843 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
844  int lnum, const void *buf, int len)
845 {
846  int err, pnum, tries = 0, vol_id = vol->vol_id;
847  struct ubi_vid_hdr *vid_hdr;
848  uint32_t crc;
849 
850  if (ubi->ro_mode)
851  return -EROFS;
852 
853  if (len == 0) {
854  /*
855  * Special case when data length is zero. In this case the LEB
856  * has to be unmapped and mapped somewhere else.
857  */
858  err = ubi_eba_unmap_leb(ubi, vol, lnum);
859  if (err)
860  return err;
861  return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
862  }
863 
864  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
865  if (!vid_hdr)
866  return -ENOMEM;
867 
868  mutex_lock(&ubi->alc_mutex);
869  err = leb_write_lock(ubi, vol_id, lnum);
870  if (err)
871  goto out_mutex;
872 
873  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
874  vid_hdr->vol_id = cpu_to_be32(vol_id);
875  vid_hdr->lnum = cpu_to_be32(lnum);
876  vid_hdr->compat = ubi_get_compat(ubi, vol_id);
877  vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
878 
879  crc = crc32(UBI_CRC32_INIT, buf, len);
880  vid_hdr->vol_type = UBI_VID_DYNAMIC;
881  vid_hdr->data_size = cpu_to_be32(len);
882  vid_hdr->copy_flag = 1;
883  vid_hdr->data_crc = cpu_to_be32(crc);
884 
885 retry:
886  pnum = ubi_wl_get_peb(ubi);
887  if (pnum < 0) {
888  err = pnum;
889  goto out_leb_unlock;
890  }
891 
892  dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
893  vol_id, lnum, vol->eba_tbl[lnum], pnum);
894 
895  err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
896  if (err) {
897  ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
898  vol_id, lnum, pnum);
899  goto write_error;
900  }
901 
902  err = ubi_io_write_data(ubi, buf, pnum, 0, len);
903  if (err) {
904  ubi_warn("failed to write %d bytes of data to PEB %d",
905  len, pnum);
906  goto write_error;
907  }
908 
909  if (vol->eba_tbl[lnum] >= 0) {
910  err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
911  if (err)
912  goto out_leb_unlock;
913  }
914 
915  down_read(&ubi->fm_sem);
916  vol->eba_tbl[lnum] = pnum;
917  up_read(&ubi->fm_sem);
918 
919 out_leb_unlock:
920  leb_write_unlock(ubi, vol_id, lnum);
921 out_mutex:
922  mutex_unlock(&ubi->alc_mutex);
923  ubi_free_vid_hdr(ubi, vid_hdr);
924  return err;
925 
926 write_error:
927  if (err != -EIO || !ubi->bad_allowed) {
928  /*
929  * This flash device does not admit of bad eraseblocks or
930  * something nasty and unexpected happened. Switch to read-only
931  * mode just in case.
932  */
933  ubi_ro_mode(ubi);
934  goto out_leb_unlock;
935  }
936 
937  err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
938  if (err || ++tries > UBI_IO_RETRIES) {
939  ubi_ro_mode(ubi);
940  goto out_leb_unlock;
941  }
942 
943  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
944  ubi_msg("try another PEB");
945  goto retry;
946 }
947 
967 static int is_error_sane(int err)
968 {
969  if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
970  err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
971  return 0;
972  return 1;
973 }
974 
989 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
990  struct ubi_vid_hdr *vid_hdr)
991 {
992  int err, vol_id, lnum, data_size, aldata_size, idx;
993  struct ubi_volume *vol;
994  uint32_t crc;
995 
996  vol_id = be32_to_cpu(vid_hdr->vol_id);
997  lnum = be32_to_cpu(vid_hdr->lnum);
998 
999  dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1000 
1001  if (vid_hdr->vol_type == UBI_VID_STATIC) {
1002  data_size = be32_to_cpu(vid_hdr->data_size);
1003  aldata_size = ALIGN(data_size, ubi->min_io_size);
1004  } else
1005  data_size = aldata_size =
1006  ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1007 
1008  idx = vol_id2idx(ubi, vol_id);
1009  spin_lock(&ubi->volumes_lock);
1010  /*
1011  * Note, we may race with volume deletion, which means that the volume
1012  * this logical eraseblock belongs to might be being deleted. Since the
1013  * volume deletion un-maps all the volume's logical eraseblocks, it will
1014  * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1015  */
1016  vol = ubi->volumes[idx];
1017  spin_unlock(&ubi->volumes_lock);
1018  if (!vol) {
1019  /* No need to do further work, cancel */
1020  dbg_wl("volume %d is being removed, cancel", vol_id);
1021  return MOVE_CANCEL_RACE;
1022  }
1023 
1024  /*
1025  * We do not want anybody to write to this logical eraseblock while we
1026  * are moving it, so lock it.
1027  *
1028  * Note, we are using non-waiting locking here, because we cannot sleep
1029  * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1030  * unmapping the LEB which is mapped to the PEB we are going to move
1031  * (@from). This task locks the LEB and goes sleep in the
1032  * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1033  * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1034  * LEB is already locked, we just do not move it and return
1035  * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1036  * we do not know the reasons of the contention - it may be just a
1037  * normal I/O on this LEB, so we want to re-try.
1038  */
1039  err = leb_write_trylock(ubi, vol_id, lnum);
1040  if (err) {
1041  dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1042  return MOVE_RETRY;
1043  }
1044 
1045  /*
1046  * The LEB might have been put meanwhile, and the task which put it is
1047  * probably waiting on @ubi->move_mutex. No need to continue the work,
1048  * cancel it.
1049  */
1050  if (vol->eba_tbl[lnum] != from) {
1051  dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1052  vol_id, lnum, from, vol->eba_tbl[lnum]);
1053  err = MOVE_CANCEL_RACE;
1054  goto out_unlock_leb;
1055  }
1056 
1057  /*
1058  * OK, now the LEB is locked and we can safely start moving it. Since
1059  * this function utilizes the @ubi->peb_buf buffer which is shared
1060  * with some other functions - we lock the buffer by taking the
1061  * @ubi->buf_mutex.
1062  */
1063  mutex_lock(&ubi->buf_mutex);
1064  dbg_wl("read %d bytes of data", aldata_size);
1065  err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1066  if (err && err != UBI_IO_BITFLIPS) {
1067  ubi_warn("error %d while reading data from PEB %d",
1068  err, from);
1069  err = MOVE_SOURCE_RD_ERR;
1070  goto out_unlock_buf;
1071  }
1072 
1073  /*
1074  * Now we have got to calculate how much data we have to copy. In
1075  * case of a static volume it is fairly easy - the VID header contains
1076  * the data size. In case of a dynamic volume it is more difficult - we
1077  * have to read the contents, cut 0xFF bytes from the end and copy only
1078  * the first part. We must do this to avoid writing 0xFF bytes as it
1079  * may have some side-effects. And not only this. It is important not
1080  * to include those 0xFFs to CRC because later the they may be filled
1081  * by data.
1082  */
1083  if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1084  aldata_size = data_size =
1085  ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1086 
1087  cond_resched();
1088  crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1089  cond_resched();
1090 
1091  /*
1092  * It may turn out to be that the whole @from physical eraseblock
1093  * contains only 0xFF bytes. Then we have to only write the VID header
1094  * and do not write any data. This also means we should not set
1095  * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1096  */
1097  if (data_size > 0) {
1098  vid_hdr->copy_flag = 1;
1099  vid_hdr->data_size = cpu_to_be32(data_size);
1100  vid_hdr->data_crc = cpu_to_be32(crc);
1101  }
1102  vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1103 
1104  err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1105  if (err) {
1106  if (err == -EIO)
1107  err = MOVE_TARGET_WR_ERR;
1108  goto out_unlock_buf;
1109  }
1110 
1111  cond_resched();
1112 
1113  /* Read the VID header back and check if it was written correctly */
1114  err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1115  if (err) {
1116  if (err != UBI_IO_BITFLIPS) {
1117  ubi_warn("error %d while reading VID header back from PEB %d",
1118  err, to);
1119  if (is_error_sane(err))
1120  err = MOVE_TARGET_RD_ERR;
1121  } else
1122  err = MOVE_TARGET_BITFLIPS;
1123  goto out_unlock_buf;
1124  }
1125 
1126  if (data_size > 0) {
1127  err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1128  if (err) {
1129  if (err == -EIO)
1130  err = MOVE_TARGET_WR_ERR;
1131  goto out_unlock_buf;
1132  }
1133 
1134  cond_resched();
1135 
1136  /*
1137  * We've written the data and are going to read it back to make
1138  * sure it was written correctly.
1139  */
1140  memset(ubi->peb_buf, 0xFF, aldata_size);
1141  err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1142  if (err) {
1143  if (err != UBI_IO_BITFLIPS) {
1144  ubi_warn("error %d while reading data back from PEB %d",
1145  err, to);
1146  if (is_error_sane(err))
1147  err = MOVE_TARGET_RD_ERR;
1148  } else
1149  err = MOVE_TARGET_BITFLIPS;
1150  goto out_unlock_buf;
1151  }
1152 
1153  cond_resched();
1154 
1155  if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1156  ubi_warn("read data back from PEB %d and it is different",
1157  to);
1158  err = -EINVAL;
1159  goto out_unlock_buf;
1160  }
1161  }
1162 
1163  ubi_assert(vol->eba_tbl[lnum] == from);
1164  down_read(&ubi->fm_sem);
1165  vol->eba_tbl[lnum] = to;
1166  up_read(&ubi->fm_sem);
1167 
1168 out_unlock_buf:
1169  mutex_unlock(&ubi->buf_mutex);
1170 out_unlock_leb:
1171  leb_write_unlock(ubi, vol_id, lnum);
1172  return err;
1173 }
1174 
1193 static void print_rsvd_warning(struct ubi_device *ubi,
1194  struct ubi_attach_info *ai)
1195 {
1196  /*
1197  * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1198  * large number to distinguish between newly flashed and used images.
1199  */
1200  if (ai->max_sqnum > (1 << 18)) {
1201  int min = ubi->beb_rsvd_level / 10;
1202 
1203  if (!min)
1204  min = 1;
1205  if (ubi->beb_rsvd_pebs > min)
1206  return;
1207  }
1208 
1209  ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1210  ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1211  if (ubi->corr_peb_count)
1212  ubi_warn("%d PEBs are corrupted and not used",
1213  ubi->corr_peb_count);
1214 }
1215 
1226 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1227  struct ubi_attach_info *ai_scan)
1228 {
1229  int i, j, num_volumes, ret = 0;
1230  int **scan_eba, **fm_eba;
1231  struct ubi_ainf_volume *av;
1232  struct ubi_volume *vol;
1233  struct ubi_ainf_peb *aeb;
1234  struct rb_node *rb;
1235 
1236  num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1237 
1238  scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1239  if (!scan_eba)
1240  return -ENOMEM;
1241 
1242  fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1243  if (!fm_eba) {
1244  kfree(scan_eba);
1245  return -ENOMEM;
1246  }
1247 
1248  for (i = 0; i < num_volumes; i++) {
1249  vol = ubi->volumes[i];
1250  if (!vol)
1251  continue;
1252 
1253  scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1254  GFP_KERNEL);
1255  if (!scan_eba[i]) {
1256  ret = -ENOMEM;
1257  goto out_free;
1258  }
1259 
1260  fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1261  GFP_KERNEL);
1262  if (!fm_eba[i]) {
1263  ret = -ENOMEM;
1264  goto out_free;
1265  }
1266 
1267  for (j = 0; j < vol->reserved_pebs; j++)
1268  scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1269 
1270  av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1271  if (!av)
1272  continue;
1273 
1274  ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1275  scan_eba[i][aeb->lnum] = aeb->pnum;
1276 
1277  av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1278  if (!av)
1279  continue;
1280 
1281  ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1282  fm_eba[i][aeb->lnum] = aeb->pnum;
1283 
1284  for (j = 0; j < vol->reserved_pebs; j++) {
1285  if (scan_eba[i][j] != fm_eba[i][j]) {
1286  if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1287  fm_eba[i][j] == UBI_LEB_UNMAPPED)
1288  continue;
1289 
1290  ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
1291  vol->vol_id, i, fm_eba[i][j],
1292  scan_eba[i][j]);
1293  ubi_assert(0);
1294  }
1295  }
1296  }
1297 
1298 out_free:
1299  for (i = 0; i < num_volumes; i++) {
1300  if (!ubi->volumes[i])
1301  continue;
1302 
1303  kfree(scan_eba[i]);
1304  kfree(fm_eba[i]);
1305  }
1306 
1307  kfree(scan_eba);
1308  kfree(fm_eba);
1309  return ret;
1310 }
1311 
1320 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1321 {
1322  int i, j, err, num_volumes;
1323  struct ubi_ainf_volume *av;
1324  struct ubi_volume *vol;
1325  struct ubi_ainf_peb *aeb;
1326  struct rb_node *rb;
1327 
1328  dbg_eba("initialize EBA sub-system");
1329 
1330  spin_lock_init(&ubi->ltree_lock);
1331  mutex_init(&ubi->alc_mutex);
1332  ubi->ltree = RB_ROOT;
1333 
1334  ubi->global_sqnum = ai->max_sqnum + 1;
1335  num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1336 
1337  for (i = 0; i < num_volumes; i++) {
1338  vol = ubi->volumes[i];
1339  if (!vol)
1340  continue;
1341 
1342  cond_resched();
1343 
1344  vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1345  GFP_KERNEL);
1346  if (!vol->eba_tbl) {
1347  err = -ENOMEM;
1348  goto out_free;
1349  }
1350 
1351  for (j = 0; j < vol->reserved_pebs; j++)
1352  vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1353 
1354  av = ubi_find_av(ai, idx2vol_id(ubi, i));
1355  if (!av)
1356  continue;
1357 
1358  ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1359  if (aeb->lnum >= vol->reserved_pebs)
1360  /*
1361  * This may happen in case of an unclean reboot
1362  * during re-size.
1363  */
1364  ubi_move_aeb_to_list(av, aeb, &ai->erase);
1365  vol->eba_tbl[aeb->lnum] = aeb->pnum;
1366  }
1367  }
1368 
1369  if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1370  ubi_err("no enough physical eraseblocks (%d, need %d)",
1372  if (ubi->corr_peb_count)
1373  ubi_err("%d PEBs are corrupted and not used",
1374  ubi->corr_peb_count);
1375  err = -ENOSPC;
1376  goto out_free;
1377  }
1378  ubi->avail_pebs -= EBA_RESERVED_PEBS;
1379  ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1380 
1381  if (ubi->bad_allowed) {
1383 
1384  if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1385  /* No enough free physical eraseblocks */
1386  ubi->beb_rsvd_pebs = ubi->avail_pebs;
1387  print_rsvd_warning(ubi, ai);
1388  } else
1389  ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1390 
1391  ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1392  ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1393  }
1394 
1395  dbg_eba("EBA sub-system is initialized");
1396  return 0;
1397 
1398 out_free:
1399  for (i = 0; i < num_volumes; i++) {
1400  if (!ubi->volumes[i])
1401  continue;
1402  kfree(ubi->volumes[i]->eba_tbl);
1403  ubi->volumes[i]->eba_tbl = NULL;
1404  }
1405  return err;
1406 }