Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fastmap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Author: Richard Weinberger <[email protected]>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/crc32.h>
17 #include "ubi.h"
18 
23 size_t ubi_calc_fm_size(struct ubi_device *ubi)
24 {
25  size_t size;
26 
27  size = sizeof(struct ubi_fm_hdr) + \
28  sizeof(struct ubi_fm_scan_pool) + \
29  sizeof(struct ubi_fm_scan_pool) + \
30  (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
31  (sizeof(struct ubi_fm_eba) + \
32  (ubi->peb_count * sizeof(__be32))) + \
33  sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34  return roundup(size, ubi->leb_size);
35 }
36 
37 
46 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
47 {
48  struct ubi_vid_hdr *new;
49 
50  new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
51  if (!new)
52  goto out;
53 
54  new->vol_type = UBI_VID_DYNAMIC;
55  new->vol_id = cpu_to_be32(vol_id);
56 
57  /* UBI implementations without fastmap support have to delete the
58  * fastmap.
59  */
60  new->compat = UBI_COMPAT_DELETE;
61 
62 out:
63  return new;
64 }
65 
76 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
77  int pnum, int ec, int scrub)
78 {
79  struct ubi_ainf_peb *aeb;
80 
82  if (!aeb)
83  return -ENOMEM;
84 
85  aeb->pnum = pnum;
86  aeb->ec = ec;
87  aeb->lnum = -1;
88  aeb->scrub = scrub;
89  aeb->copy_flag = aeb->sqnum = 0;
90 
91  ai->ec_sum += aeb->ec;
92  ai->ec_count++;
93 
94  if (ai->max_ec < aeb->ec)
95  ai->max_ec = aeb->ec;
96 
97  if (ai->min_ec > aeb->ec)
98  ai->min_ec = aeb->ec;
99 
100  list_add_tail(&aeb->u.list, list);
101 
102  return 0;
103 }
104 
117 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
118  int used_ebs, int data_pad, u8 vol_type,
119  int last_eb_bytes)
120 {
121  struct ubi_ainf_volume *av;
122  struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
123 
124  while (*p) {
125  parent = *p;
126  av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 
128  if (vol_id > av->vol_id)
129  p = &(*p)->rb_left;
130  else if (vol_id > av->vol_id)
131  p = &(*p)->rb_right;
132  }
133 
134  av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
135  if (!av)
136  goto out;
137 
138  av->highest_lnum = av->leb_count = 0;
139  av->vol_id = vol_id;
140  av->used_ebs = used_ebs;
141  av->data_pad = data_pad;
142  av->last_data_size = last_eb_bytes;
143  av->compat = 0;
144  av->vol_type = vol_type;
145  av->root = RB_ROOT;
146 
147  dbg_bld("found volume (ID %i)", vol_id);
148 
149  rb_link_node(&av->rb, parent, p);
150  rb_insert_color(&av->rb, &ai->volumes);
151 
152 out:
153  return av;
154 }
155 
163 static void assign_aeb_to_av(struct ubi_attach_info *ai,
164  struct ubi_ainf_peb *aeb,
165  struct ubi_ainf_volume *av)
166 {
167  struct ubi_ainf_peb *tmp_aeb;
168  struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
169 
170  p = &av->root.rb_node;
171  while (*p) {
172  parent = *p;
173 
174  tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
175  if (aeb->lnum != tmp_aeb->lnum) {
176  if (aeb->lnum < tmp_aeb->lnum)
177  p = &(*p)->rb_left;
178  else
179  p = &(*p)->rb_right;
180 
181  continue;
182  } else
183  break;
184  }
185 
186  list_del(&aeb->u.list);
187  av->leb_count++;
188 
189  rb_link_node(&aeb->u.rb, parent, p);
190  rb_insert_color(&aeb->u.rb, &av->root);
191 }
192 
203 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
204  struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
205  struct ubi_ainf_peb *new_aeb)
206 {
207  struct rb_node **p = &av->root.rb_node, *parent = NULL;
208  struct ubi_ainf_peb *aeb, *victim;
209  int cmp_res;
210 
211  while (*p) {
212  parent = *p;
213  aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214 
215  if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
216  if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
217  p = &(*p)->rb_left;
218  else
219  p = &(*p)->rb_right;
220 
221  continue;
222  }
223 
224  /* This case can happen if the fastmap gets written
225  * because of a volume change (creation, deletion, ..).
226  * Then a PEB can be within the persistent EBA and the pool.
227  */
228  if (aeb->pnum == new_aeb->pnum) {
229  ubi_assert(aeb->lnum == new_aeb->lnum);
230  kmem_cache_free(ai->aeb_slab_cache, new_aeb);
231 
232  return 0;
233  }
234 
235  cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
236  if (cmp_res < 0)
237  return cmp_res;
238 
239  /* new_aeb is newer */
240  if (cmp_res & 1) {
241  victim = kmem_cache_alloc(ai->aeb_slab_cache,
242  GFP_KERNEL);
243  if (!victim)
244  return -ENOMEM;
245 
246  victim->ec = aeb->ec;
247  victim->pnum = aeb->pnum;
248  list_add_tail(&victim->u.list, &ai->erase);
249 
250  if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
251  av->last_data_size = \
252  be32_to_cpu(new_vh->data_size);
253 
254  dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255  av->vol_id, aeb->lnum, new_aeb->pnum);
256 
257  aeb->ec = new_aeb->ec;
258  aeb->pnum = new_aeb->pnum;
259  aeb->copy_flag = new_vh->copy_flag;
260  aeb->scrub = new_aeb->scrub;
261  kmem_cache_free(ai->aeb_slab_cache, new_aeb);
262 
263  /* new_aeb is older */
264  } else {
265  dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266  av->vol_id, aeb->lnum, new_aeb->pnum);
267  list_add_tail(&new_aeb->u.list, &ai->erase);
268  }
269 
270  return 0;
271  }
272  /* This LEB is new, let's add it to the volume */
273 
274  if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
275  av->highest_lnum = be32_to_cpu(new_vh->lnum);
276  av->last_data_size = be32_to_cpu(new_vh->data_size);
277  }
278 
279  if (av->vol_type == UBI_STATIC_VOLUME)
280  av->used_ebs = be32_to_cpu(new_vh->used_ebs);
281 
282  av->leb_count++;
283 
284  rb_link_node(&new_aeb->u.rb, parent, p);
285  rb_insert_color(&new_aeb->u.rb, &av->root);
286 
287  return 0;
288 }
289 
299 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
300  struct ubi_vid_hdr *new_vh,
301  struct ubi_ainf_peb *new_aeb)
302 {
303  struct ubi_ainf_volume *av, *tmp_av = NULL;
304  struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
305  int found = 0;
306 
307  if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
309  kmem_cache_free(ai->aeb_slab_cache, new_aeb);
310 
311  return 0;
312  }
313 
314  /* Find the volume this SEB belongs to */
315  while (*p) {
316  parent = *p;
317  tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
318 
319  if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
320  p = &(*p)->rb_left;
321  else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
322  p = &(*p)->rb_right;
323  else {
324  found = 1;
325  break;
326  }
327  }
328 
329  if (found)
330  av = tmp_av;
331  else {
332  ubi_err("orphaned volume in fastmap pool!");
333  return UBI_BAD_FASTMAP;
334  }
335 
336  ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
337 
338  return update_vol(ubi, ai, av, new_vh, new_aeb);
339 }
340 
349 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
350 {
351  struct ubi_ainf_volume *av;
352  struct rb_node *node, *node2;
353  struct ubi_ainf_peb *aeb;
354 
355  for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
356  av = rb_entry(node, struct ubi_ainf_volume, rb);
357 
358  for (node2 = rb_first(&av->root); node2;
359  node2 = rb_next(node2)) {
360  aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
361  if (aeb->pnum == pnum) {
362  rb_erase(&aeb->u.rb, &av->root);
364  return;
365  }
366  }
367  }
368 }
369 
383 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
384  int *pebs, int pool_size, unsigned long long *max_sqnum,
385  struct list_head *eba_orphans, struct list_head *free)
386 {
387  struct ubi_vid_hdr *vh;
388  struct ubi_ec_hdr *ech;
389  struct ubi_ainf_peb *new_aeb, *tmp_aeb;
390  int i, pnum, err, found_orphan, ret = 0;
391 
392  ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
393  if (!ech)
394  return -ENOMEM;
395 
396  vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
397  if (!vh) {
398  kfree(ech);
399  return -ENOMEM;
400  }
401 
402  dbg_bld("scanning fastmap pool: size = %i", pool_size);
403 
404  /*
405  * Now scan all PEBs in the pool to find changes which have been made
406  * after the creation of the fastmap
407  */
408  for (i = 0; i < pool_size; i++) {
409  int scrub = 0;
410 
411  pnum = be32_to_cpu(pebs[i]);
412 
413  if (ubi_io_is_bad(ubi, pnum)) {
414  ubi_err("bad PEB in fastmap pool!");
415  ret = UBI_BAD_FASTMAP;
416  goto out;
417  }
418 
419  err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
420  if (err && err != UBI_IO_BITFLIPS) {
421  ubi_err("unable to read EC header! PEB:%i err:%i",
422  pnum, err);
423  ret = err > 0 ? UBI_BAD_FASTMAP : err;
424  goto out;
425  } else if (ret == UBI_IO_BITFLIPS)
426  scrub = 1;
427 
428  if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
429  ubi_err("bad image seq: 0x%x, expected: 0x%x",
430  be32_to_cpu(ech->image_seq), ubi->image_seq);
431  err = UBI_BAD_FASTMAP;
432  goto out;
433  }
434 
435  err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
436  if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
437  unsigned long long ec = be64_to_cpu(ech->ec);
438  unmap_peb(ai, pnum);
439  dbg_bld("Adding PEB to free: %i", pnum);
440  if (err == UBI_IO_FF_BITFLIPS)
441  add_aeb(ai, free, pnum, ec, 1);
442  else
443  add_aeb(ai, free, pnum, ec, 0);
444  continue;
445  } else if (err == 0 || err == UBI_IO_BITFLIPS) {
446  dbg_bld("Found non empty PEB:%i in pool", pnum);
447 
448  if (err == UBI_IO_BITFLIPS)
449  scrub = 1;
450 
451  found_orphan = 0;
452  list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
453  if (tmp_aeb->pnum == pnum) {
454  found_orphan = 1;
455  break;
456  }
457  }
458  if (found_orphan) {
459  kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
460  list_del(&tmp_aeb->u.list);
461  }
462 
463  new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
464  GFP_KERNEL);
465  if (!new_aeb) {
466  ret = -ENOMEM;
467  goto out;
468  }
469 
470  new_aeb->ec = be64_to_cpu(ech->ec);
471  new_aeb->pnum = pnum;
472  new_aeb->lnum = be32_to_cpu(vh->lnum);
473  new_aeb->sqnum = be64_to_cpu(vh->sqnum);
474  new_aeb->copy_flag = vh->copy_flag;
475  new_aeb->scrub = scrub;
476 
477  if (*max_sqnum < new_aeb->sqnum)
478  *max_sqnum = new_aeb->sqnum;
479 
480  err = process_pool_aeb(ubi, ai, vh, new_aeb);
481  if (err) {
482  ret = err > 0 ? UBI_BAD_FASTMAP : err;
483  goto out;
484  }
485  } else {
486  /* We are paranoid and fall back to scanning mode */
487  ubi_err("fastmap pool PEBs contains damaged PEBs!");
488  ret = err > 0 ? UBI_BAD_FASTMAP : err;
489  goto out;
490  }
491 
492  }
493 
494 out:
495  ubi_free_vid_hdr(ubi, vh);
496  kfree(ech);
497  return ret;
498 }
499 
504 static int count_fastmap_pebs(struct ubi_attach_info *ai)
505 {
506  struct ubi_ainf_peb *aeb;
507  struct ubi_ainf_volume *av;
508  struct rb_node *rb1, *rb2;
509  int n = 0;
510 
511  list_for_each_entry(aeb, &ai->erase, u.list)
512  n++;
513 
514  list_for_each_entry(aeb, &ai->free, u.list)
515  n++;
516 
517  ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
518  ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
519  n++;
520 
521  return n;
522 }
523 
533 static int ubi_attach_fastmap(struct ubi_device *ubi,
536 {
537  struct list_head used, eba_orphans, free;
538  struct ubi_ainf_volume *av;
539  struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
540  struct ubi_ec_hdr *ech;
541  struct ubi_fm_sb *fmsb;
542  struct ubi_fm_hdr *fmhdr;
543  struct ubi_fm_scan_pool *fmpl1, *fmpl2;
544  struct ubi_fm_ec *fmec;
545  struct ubi_fm_volhdr *fmvhdr;
546  struct ubi_fm_eba *fm_eba;
547  int ret, i, j, pool_size, wl_pool_size;
548  size_t fm_pos = 0, fm_size = ubi->fm_size;
549  unsigned long long max_sqnum = 0;
550  void *fm_raw = ubi->fm_buf;
551 
552  INIT_LIST_HEAD(&used);
553  INIT_LIST_HEAD(&free);
554  INIT_LIST_HEAD(&eba_orphans);
555  INIT_LIST_HEAD(&ai->corr);
556  INIT_LIST_HEAD(&ai->free);
557  INIT_LIST_HEAD(&ai->erase);
558  INIT_LIST_HEAD(&ai->alien);
559  ai->volumes = RB_ROOT;
560  ai->min_ec = UBI_MAX_ERASECOUNTER;
561 
562  ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
563  sizeof(struct ubi_ainf_peb),
564  0, 0, NULL);
565  if (!ai->aeb_slab_cache) {
566  ret = -ENOMEM;
567  goto fail;
568  }
569 
570  fmsb = (struct ubi_fm_sb *)(fm_raw);
571  ai->max_sqnum = fmsb->sqnum;
572  fm_pos += sizeof(struct ubi_fm_sb);
573  if (fm_pos >= fm_size)
574  goto fail_bad;
575 
576  fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577  fm_pos += sizeof(*fmhdr);
578  if (fm_pos >= fm_size)
579  goto fail_bad;
580 
581  if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582  ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
584  goto fail_bad;
585  }
586 
587  fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588  fm_pos += sizeof(*fmpl1);
589  if (fm_pos >= fm_size)
590  goto fail_bad;
591  if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
592  ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
594  goto fail_bad;
595  }
596 
597  fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598  fm_pos += sizeof(*fmpl2);
599  if (fm_pos >= fm_size)
600  goto fail_bad;
601  if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
602  ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
604  goto fail_bad;
605  }
606 
607  pool_size = be16_to_cpu(fmpl1->size);
608  wl_pool_size = be16_to_cpu(fmpl2->size);
609  fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
610  fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
611 
612  if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613  ubi_err("bad pool size: %i", pool_size);
614  goto fail_bad;
615  }
616 
617  if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618  ubi_err("bad WL pool size: %i", wl_pool_size);
619  goto fail_bad;
620  }
621 
622 
623  if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624  fm->max_pool_size < 0) {
625  ubi_err("bad maximal pool size: %i", fm->max_pool_size);
626  goto fail_bad;
627  }
628 
629  if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630  fm->max_wl_pool_size < 0) {
631  ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
632  goto fail_bad;
633  }
634 
635  /* read EC values from free list */
636  for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637  fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638  fm_pos += sizeof(*fmec);
639  if (fm_pos >= fm_size)
640  goto fail_bad;
641 
642  add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643  be32_to_cpu(fmec->ec), 0);
644  }
645 
646  /* read EC values from used list */
647  for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
648  fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649  fm_pos += sizeof(*fmec);
650  if (fm_pos >= fm_size)
651  goto fail_bad;
652 
653  add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
654  be32_to_cpu(fmec->ec), 0);
655  }
656 
657  /* read EC values from scrub list */
658  for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
659  fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660  fm_pos += sizeof(*fmec);
661  if (fm_pos >= fm_size)
662  goto fail_bad;
663 
664  add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665  be32_to_cpu(fmec->ec), 1);
666  }
667 
668  /* read EC values from erase list */
669  for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
670  fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671  fm_pos += sizeof(*fmec);
672  if (fm_pos >= fm_size)
673  goto fail_bad;
674 
675  add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
676  be32_to_cpu(fmec->ec), 1);
677  }
678 
679  ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
680  ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
681 
682  /* Iterate over all volumes and read their EBA table */
683  for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
684  fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
685  fm_pos += sizeof(*fmvhdr);
686  if (fm_pos >= fm_size)
687  goto fail_bad;
688 
689  if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
690  ubi_err("bad fastmap vol header magic: 0x%x, " \
691  "expected: 0x%x",
693  goto fail_bad;
694  }
695 
696  av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
697  be32_to_cpu(fmvhdr->used_ebs),
698  be32_to_cpu(fmvhdr->data_pad),
699  fmvhdr->vol_type,
700  be32_to_cpu(fmvhdr->last_eb_bytes));
701 
702  if (!av)
703  goto fail_bad;
704 
705  ai->vols_found++;
706  if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
707  ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
708 
709  fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
710  fm_pos += sizeof(*fm_eba);
711  fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
712  if (fm_pos >= fm_size)
713  goto fail_bad;
714 
715  if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
716  ubi_err("bad fastmap EBA header magic: 0x%x, " \
717  "expected: 0x%x",
719  goto fail_bad;
720  }
721 
722  for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
723  int pnum = be32_to_cpu(fm_eba->pnum[j]);
724 
725  if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
726  continue;
727 
728  aeb = NULL;
729  list_for_each_entry(tmp_aeb, &used, u.list) {
730  if (tmp_aeb->pnum == pnum)
731  aeb = tmp_aeb;
732  }
733 
734  /* This can happen if a PEB is already in an EBA known
735  * by this fastmap but the PEB itself is not in the used
736  * list.
737  * In this case the PEB can be within the fastmap pool
738  * or while writing the fastmap it was in the protection
739  * queue.
740  */
741  if (!aeb) {
742  aeb = kmem_cache_alloc(ai->aeb_slab_cache,
743  GFP_KERNEL);
744  if (!aeb) {
745  ret = -ENOMEM;
746 
747  goto fail;
748  }
749 
750  aeb->lnum = j;
751  aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
752  aeb->ec = -1;
753  aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
754  list_add_tail(&aeb->u.list, &eba_orphans);
755  continue;
756  }
757 
758  aeb->lnum = j;
759 
760  if (av->highest_lnum <= aeb->lnum)
761  av->highest_lnum = aeb->lnum;
762 
763  assign_aeb_to_av(ai, aeb, av);
764 
765  dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
766  aeb->pnum, aeb->lnum, av->vol_id);
767  }
768 
769  ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
770  if (!ech) {
771  ret = -ENOMEM;
772  goto fail;
773  }
774 
775  list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776  u.list) {
777  int err;
778 
779  if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
780  ubi_err("bad PEB in fastmap EBA orphan list");
781  ret = UBI_BAD_FASTMAP;
782  kfree(ech);
783  goto fail;
784  }
785 
786  err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
787  if (err && err != UBI_IO_BITFLIPS) {
788  ubi_err("unable to read EC header! PEB:%i " \
789  "err:%i", tmp_aeb->pnum, err);
790  ret = err > 0 ? UBI_BAD_FASTMAP : err;
791  kfree(ech);
792 
793  goto fail;
794  } else if (err == UBI_IO_BITFLIPS)
795  tmp_aeb->scrub = 1;
796 
797  tmp_aeb->ec = be64_to_cpu(ech->ec);
798  assign_aeb_to_av(ai, tmp_aeb, av);
799  }
800 
801  kfree(ech);
802  }
803 
804  ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
805  &eba_orphans, &free);
806  if (ret)
807  goto fail;
808 
809  ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
810  &eba_orphans, &free);
811  if (ret)
812  goto fail;
813 
814  if (max_sqnum > ai->max_sqnum)
815  ai->max_sqnum = max_sqnum;
816 
817  list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
818  list_del(&tmp_aeb->u.list);
819  list_add_tail(&tmp_aeb->u.list, &ai->free);
820  }
821 
822  /*
823  * If fastmap is leaking PEBs (must not happen), raise a
824  * fat warning and fall back to scanning mode.
825  * We do this here because in ubi_wl_init() it's too late
826  * and we cannot fall back to scanning.
827  */
828  if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
829  ai->bad_peb_count - fm->used_blocks))
830  goto fail_bad;
831 
832  return 0;
833 
834 fail_bad:
835  ret = UBI_BAD_FASTMAP;
836 fail:
837  return ret;
838 }
839 
850 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
851  int fm_anchor)
852 {
853  struct ubi_fm_sb *fmsb, *fmsb2;
854  struct ubi_vid_hdr *vh;
855  struct ubi_ec_hdr *ech;
856  struct ubi_fastmap_layout *fm;
857  int i, used_blocks, pnum, ret = 0;
858  size_t fm_size;
859  __be32 crc, tmp_crc;
860  unsigned long long sqnum = 0;
861 
862  mutex_lock(&ubi->fm_mutex);
863  memset(ubi->fm_buf, 0, ubi->fm_size);
864 
865  fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
866  if (!fmsb) {
867  ret = -ENOMEM;
868  goto out;
869  }
870 
871  fm = kzalloc(sizeof(*fm), GFP_KERNEL);
872  if (!fm) {
873  ret = -ENOMEM;
874  kfree(fmsb);
875  goto out;
876  }
877 
878  ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
879  if (ret && ret != UBI_IO_BITFLIPS)
880  goto free_fm_sb;
881  else if (ret == UBI_IO_BITFLIPS)
882  fm->to_be_tortured[0] = 1;
883 
884  if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
885  ubi_err("bad super block magic: 0x%x, expected: 0x%x",
887  ret = UBI_BAD_FASTMAP;
888  goto free_fm_sb;
889  }
890 
891  if (fmsb->version != UBI_FM_FMT_VERSION) {
892  ubi_err("bad fastmap version: %i, expected: %i",
893  fmsb->version, UBI_FM_FMT_VERSION);
894  ret = UBI_BAD_FASTMAP;
895  goto free_fm_sb;
896  }
897 
898  used_blocks = be32_to_cpu(fmsb->used_blocks);
899  if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
900  ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
901  ret = UBI_BAD_FASTMAP;
902  goto free_fm_sb;
903  }
904 
905  fm_size = ubi->leb_size * used_blocks;
906  if (fm_size != ubi->fm_size) {
907  ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
908  ubi->fm_size);
909  ret = UBI_BAD_FASTMAP;
910  goto free_fm_sb;
911  }
912 
913  ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
914  if (!ech) {
915  ret = -ENOMEM;
916  goto free_fm_sb;
917  }
918 
919  vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
920  if (!vh) {
921  ret = -ENOMEM;
922  goto free_hdr;
923  }
924 
925  for (i = 0; i < used_blocks; i++) {
926  pnum = be32_to_cpu(fmsb->block_loc[i]);
927 
928  if (ubi_io_is_bad(ubi, pnum)) {
929  ret = UBI_BAD_FASTMAP;
930  goto free_hdr;
931  }
932 
933  ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
934  if (ret && ret != UBI_IO_BITFLIPS) {
935  ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
936  i, pnum);
937  if (ret > 0)
938  ret = UBI_BAD_FASTMAP;
939  goto free_hdr;
940  } else if (ret == UBI_IO_BITFLIPS)
941  fm->to_be_tortured[i] = 1;
942 
943  if (!ubi->image_seq)
944  ubi->image_seq = be32_to_cpu(ech->image_seq);
945 
946  if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
947  ret = UBI_BAD_FASTMAP;
948  goto free_hdr;
949  }
950 
951  ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
952  if (ret && ret != UBI_IO_BITFLIPS) {
953  ubi_err("unable to read fastmap block# %i (PEB: %i)",
954  i, pnum);
955  goto free_hdr;
956  }
957 
958  if (i == 0) {
959  if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
960  ubi_err("bad fastmap anchor vol_id: 0x%x," \
961  " expected: 0x%x",
962  be32_to_cpu(vh->vol_id),
964  ret = UBI_BAD_FASTMAP;
965  goto free_hdr;
966  }
967  } else {
969  ubi_err("bad fastmap data vol_id: 0x%x," \
970  " expected: 0x%x",
971  be32_to_cpu(vh->vol_id),
973  ret = UBI_BAD_FASTMAP;
974  goto free_hdr;
975  }
976  }
977 
978  if (sqnum < be64_to_cpu(vh->sqnum))
979  sqnum = be64_to_cpu(vh->sqnum);
980 
981  ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
982  ubi->leb_start, ubi->leb_size);
983  if (ret && ret != UBI_IO_BITFLIPS) {
984  ubi_err("unable to read fastmap block# %i (PEB: %i, " \
985  "err: %i)", i, pnum, ret);
986  goto free_hdr;
987  }
988  }
989 
990  kfree(fmsb);
991  fmsb = NULL;
992 
993  fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
994  tmp_crc = be32_to_cpu(fmsb2->data_crc);
995  fmsb2->data_crc = 0;
996  crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
997  if (crc != tmp_crc) {
998  ubi_err("fastmap data CRC is invalid");
999  ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
1000  ret = UBI_BAD_FASTMAP;
1001  goto free_hdr;
1002  }
1003 
1004  fmsb2->sqnum = sqnum;
1005 
1006  fm->used_blocks = used_blocks;
1007 
1008  ret = ubi_attach_fastmap(ubi, ai, fm);
1009  if (ret) {
1010  if (ret > 0)
1011  ret = UBI_BAD_FASTMAP;
1012  goto free_hdr;
1013  }
1014 
1015  for (i = 0; i < used_blocks; i++) {
1016  struct ubi_wl_entry *e;
1017 
1019  if (!e) {
1020  while (i--)
1021  kfree(fm->e[i]);
1022 
1023  ret = -ENOMEM;
1024  goto free_hdr;
1025  }
1026 
1027  e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1028  e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1029  fm->e[i] = e;
1030  }
1031 
1032  ubi->fm = fm;
1033  ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1034  ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1035  ubi_msg("attached by fastmap");
1036  ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1037  ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1038  ubi->fm_disabled = 0;
1039 
1040  ubi_free_vid_hdr(ubi, vh);
1041  kfree(ech);
1042 out:
1043  mutex_unlock(&ubi->fm_mutex);
1044  if (ret == UBI_BAD_FASTMAP)
1045  ubi_err("Attach by fastmap failed, doing a full scan!");
1046  return ret;
1047 
1048 free_hdr:
1049  ubi_free_vid_hdr(ubi, vh);
1050  kfree(ech);
1051 free_fm_sb:
1052  kfree(fmsb);
1053  kfree(fm);
1054  goto out;
1055 }
1056 
1064 static int ubi_write_fastmap(struct ubi_device *ubi,
1065  struct ubi_fastmap_layout *new_fm)
1066 {
1067  size_t fm_pos = 0;
1068  void *fm_raw;
1069  struct ubi_fm_sb *fmsb;
1070  struct ubi_fm_hdr *fmh;
1071  struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1072  struct ubi_fm_ec *fec;
1073  struct ubi_fm_volhdr *fvh;
1074  struct ubi_fm_eba *feba;
1075  struct rb_node *node;
1076  struct ubi_wl_entry *wl_e;
1077  struct ubi_volume *vol;
1078  struct ubi_vid_hdr *avhdr, *dvhdr;
1079  struct ubi_work *ubi_wrk;
1080  int ret, i, j, free_peb_count, used_peb_count, vol_count;
1081  int scrub_peb_count, erase_peb_count;
1082 
1083  fm_raw = ubi->fm_buf;
1084  memset(ubi->fm_buf, 0, ubi->fm_size);
1085 
1086  avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1087  if (!avhdr) {
1088  ret = -ENOMEM;
1089  goto out;
1090  }
1091 
1092  dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1093  if (!dvhdr) {
1094  ret = -ENOMEM;
1095  goto out_kfree;
1096  }
1097 
1098  spin_lock(&ubi->volumes_lock);
1099  spin_lock(&ubi->wl_lock);
1100 
1101  fmsb = (struct ubi_fm_sb *)fm_raw;
1102  fm_pos += sizeof(*fmsb);
1103  ubi_assert(fm_pos <= ubi->fm_size);
1104 
1105  fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1106  fm_pos += sizeof(*fmh);
1107  ubi_assert(fm_pos <= ubi->fm_size);
1108 
1110  fmsb->version = UBI_FM_FMT_VERSION;
1111  fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1112  /* the max sqnum will be filled in while *reading* the fastmap */
1113  fmsb->sqnum = 0;
1114 
1116  free_peb_count = 0;
1117  used_peb_count = 0;
1118  scrub_peb_count = 0;
1119  erase_peb_count = 0;
1120  vol_count = 0;
1121 
1122  fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1123  fm_pos += sizeof(*fmpl1);
1125  fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1126  fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1127 
1128  for (i = 0; i < ubi->fm_pool.size; i++)
1129  fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1130 
1131  fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1132  fm_pos += sizeof(*fmpl2);
1134  fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1135  fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1136 
1137  for (i = 0; i < ubi->fm_wl_pool.size; i++)
1138  fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1139 
1140  for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1141  wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1142  fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1143 
1144  fec->pnum = cpu_to_be32(wl_e->pnum);
1145  fec->ec = cpu_to_be32(wl_e->ec);
1146 
1147  free_peb_count++;
1148  fm_pos += sizeof(*fec);
1149  ubi_assert(fm_pos <= ubi->fm_size);
1150  }
1151  fmh->free_peb_count = cpu_to_be32(free_peb_count);
1152 
1153  for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1154  wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1155  fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1156 
1157  fec->pnum = cpu_to_be32(wl_e->pnum);
1158  fec->ec = cpu_to_be32(wl_e->ec);
1159 
1160  used_peb_count++;
1161  fm_pos += sizeof(*fec);
1162  ubi_assert(fm_pos <= ubi->fm_size);
1163  }
1164  fmh->used_peb_count = cpu_to_be32(used_peb_count);
1165 
1166  for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1167  wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1168  fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1169 
1170  fec->pnum = cpu_to_be32(wl_e->pnum);
1171  fec->ec = cpu_to_be32(wl_e->ec);
1172 
1173  scrub_peb_count++;
1174  fm_pos += sizeof(*fec);
1175  ubi_assert(fm_pos <= ubi->fm_size);
1176  }
1177  fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1178 
1179 
1180  list_for_each_entry(ubi_wrk, &ubi->works, list) {
1181  if (ubi_is_erase_work(ubi_wrk)) {
1182  wl_e = ubi_wrk->e;
1183  ubi_assert(wl_e);
1184 
1185  fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1186 
1187  fec->pnum = cpu_to_be32(wl_e->pnum);
1188  fec->ec = cpu_to_be32(wl_e->ec);
1189 
1190  erase_peb_count++;
1191  fm_pos += sizeof(*fec);
1192  ubi_assert(fm_pos <= ubi->fm_size);
1193  }
1194  }
1195  fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1196 
1197  for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1198  vol = ubi->volumes[i];
1199 
1200  if (!vol)
1201  continue;
1202 
1203  vol_count++;
1204 
1205  fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1206  fm_pos += sizeof(*fvh);
1207  ubi_assert(fm_pos <= ubi->fm_size);
1208 
1210  fvh->vol_id = cpu_to_be32(vol->vol_id);
1211  fvh->vol_type = vol->vol_type;
1212  fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1213  fvh->data_pad = cpu_to_be32(vol->data_pad);
1215 
1217  vol->vol_type == UBI_STATIC_VOLUME);
1218 
1219  feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1220  fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1221  ubi_assert(fm_pos <= ubi->fm_size);
1222 
1223  for (j = 0; j < vol->reserved_pebs; j++)
1224  feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1225 
1226  feba->reserved_pebs = cpu_to_be32(j);
1228  }
1229  fmh->vol_count = cpu_to_be32(vol_count);
1231 
1232  avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1233  avhdr->lnum = 0;
1234 
1235  spin_unlock(&ubi->wl_lock);
1236  spin_unlock(&ubi->volumes_lock);
1237 
1238  dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1239  ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1240  if (ret) {
1241  ubi_err("unable to write vid_hdr to fastmap SB!");
1242  goto out_kfree;
1243  }
1244 
1245  for (i = 0; i < new_fm->used_blocks; i++) {
1246  fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1247  fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1248  }
1249 
1250  fmsb->data_crc = 0;
1251  fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1252  ubi->fm_size));
1253 
1254  for (i = 1; i < new_fm->used_blocks; i++) {
1255  dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1256  dvhdr->lnum = cpu_to_be32(i);
1257  dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1258  new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1259  ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1260  if (ret) {
1261  ubi_err("unable to write vid_hdr to PEB %i!",
1262  new_fm->e[i]->pnum);
1263  goto out_kfree;
1264  }
1265  }
1266 
1267  for (i = 0; i < new_fm->used_blocks; i++) {
1268  ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1269  new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1270  if (ret) {
1271  ubi_err("unable to write fastmap to PEB %i!",
1272  new_fm->e[i]->pnum);
1273  goto out_kfree;
1274  }
1275  }
1276 
1277  ubi_assert(new_fm);
1278  ubi->fm = new_fm;
1279 
1280  dbg_bld("fastmap written!");
1281 
1282 out_kfree:
1283  ubi_free_vid_hdr(ubi, avhdr);
1284  ubi_free_vid_hdr(ubi, dvhdr);
1285 out:
1286  return ret;
1287 }
1288 
1296 static int erase_block(struct ubi_device *ubi, int pnum)
1297 {
1298  int ret;
1299  struct ubi_ec_hdr *ec_hdr;
1300  long long ec;
1301 
1302  ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1303  if (!ec_hdr)
1304  return -ENOMEM;
1305 
1306  ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1307  if (ret < 0)
1308  goto out;
1309  else if (ret && ret != UBI_IO_BITFLIPS) {
1310  ret = -EINVAL;
1311  goto out;
1312  }
1313 
1314  ret = ubi_io_sync_erase(ubi, pnum, 0);
1315  if (ret < 0)
1316  goto out;
1317 
1318  ec = be64_to_cpu(ec_hdr->ec);
1319  ec += ret;
1320  if (ec > UBI_MAX_ERASECOUNTER) {
1321  ret = -EINVAL;
1322  goto out;
1323  }
1324 
1325  ec_hdr->ec = cpu_to_be64(ec);
1326  ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1327  if (ret < 0)
1328  goto out;
1329 
1330  ret = ec;
1331 out:
1332  kfree(ec_hdr);
1333  return ret;
1334 }
1335 
1343 static int invalidate_fastmap(struct ubi_device *ubi,
1344  struct ubi_fastmap_layout *fm)
1345 {
1346  int ret, i;
1347  struct ubi_vid_hdr *vh;
1348 
1349  ret = erase_block(ubi, fm->e[0]->pnum);
1350  if (ret < 0)
1351  return ret;
1352 
1353  vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1354  if (!vh)
1355  return -ENOMEM;
1356 
1357  /* deleting the current fastmap SB is not enough, an old SB may exist,
1358  * so create a (corrupted) SB such that fastmap will find it and fall
1359  * back to scanning mode in any case */
1360  vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1361  ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1362 
1363  for (i = 0; i < fm->used_blocks; i++)
1364  ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
1365 
1366  return ret;
1367 }
1368 
1377 {
1378  int ret, i;
1379  struct ubi_fastmap_layout *new_fm, *old_fm;
1380  struct ubi_wl_entry *tmp_e;
1381 
1382  mutex_lock(&ubi->fm_mutex);
1383 
1384  ubi_refill_pools(ubi);
1385 
1386  if (ubi->ro_mode || ubi->fm_disabled) {
1387  mutex_unlock(&ubi->fm_mutex);
1388  return 0;
1389  }
1390 
1391  ret = ubi_ensure_anchor_pebs(ubi);
1392  if (ret) {
1393  mutex_unlock(&ubi->fm_mutex);
1394  return ret;
1395  }
1396 
1397  new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1398  if (!new_fm) {
1399  mutex_unlock(&ubi->fm_mutex);
1400  return -ENOMEM;
1401  }
1402 
1403  new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1404 
1405  for (i = 0; i < new_fm->used_blocks; i++) {
1407  if (!new_fm->e[i]) {
1408  while (i--)
1409  kfree(new_fm->e[i]);
1410 
1411  kfree(new_fm);
1412  mutex_unlock(&ubi->fm_mutex);
1413  return -ENOMEM;
1414  }
1415  }
1416 
1417  old_fm = ubi->fm;
1418  ubi->fm = NULL;
1419 
1420  if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1421  ubi_err("fastmap too large");
1422  ret = -ENOSPC;
1423  goto err;
1424  }
1425 
1426  for (i = 1; i < new_fm->used_blocks; i++) {
1427  spin_lock(&ubi->wl_lock);
1428  tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1429  spin_unlock(&ubi->wl_lock);
1430 
1431  if (!tmp_e && !old_fm) {
1432  int j;
1433  ubi_err("could not get any free erase block");
1434 
1435  for (j = 1; j < i; j++)
1436  ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1437 
1438  ret = -ENOSPC;
1439  goto err;
1440  } else if (!tmp_e && old_fm) {
1441  ret = erase_block(ubi, old_fm->e[i]->pnum);
1442  if (ret < 0) {
1443  int j;
1444 
1445  for (j = 1; j < i; j++)
1446  ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1447  j, 0);
1448 
1449  ubi_err("could not erase old fastmap PEB");
1450  goto err;
1451  }
1452 
1453  new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1454  new_fm->e[i]->ec = old_fm->e[i]->ec;
1455  } else {
1456  new_fm->e[i]->pnum = tmp_e->pnum;
1457  new_fm->e[i]->ec = tmp_e->ec;
1458 
1459  if (old_fm)
1460  ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1461  old_fm->to_be_tortured[i]);
1462  }
1463  }
1464 
1465  spin_lock(&ubi->wl_lock);
1466  tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1467  spin_unlock(&ubi->wl_lock);
1468 
1469  if (old_fm) {
1470  /* no fresh anchor PEB was found, reuse the old one */
1471  if (!tmp_e) {
1472  ret = erase_block(ubi, old_fm->e[0]->pnum);
1473  if (ret < 0) {
1474  int i;
1475  ubi_err("could not erase old anchor PEB");
1476 
1477  for (i = 1; i < new_fm->used_blocks; i++)
1478  ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1479  i, 0);
1480  goto err;
1481  }
1482 
1483  new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1484  new_fm->e[0]->ec = ret;
1485  } else {
1486  /* we've got a new anchor PEB, return the old one */
1487  ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1488  old_fm->to_be_tortured[0]);
1489 
1490  new_fm->e[0]->pnum = tmp_e->pnum;
1491  new_fm->e[0]->ec = tmp_e->ec;
1492  }
1493  } else {
1494  if (!tmp_e) {
1495  int i;
1496  ubi_err("could not find any anchor PEB");
1497 
1498  for (i = 1; i < new_fm->used_blocks; i++)
1499  ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1500 
1501  ret = -ENOSPC;
1502  goto err;
1503  }
1504 
1505  new_fm->e[0]->pnum = tmp_e->pnum;
1506  new_fm->e[0]->ec = tmp_e->ec;
1507  }
1508 
1509  down_write(&ubi->work_sem);
1510  down_write(&ubi->fm_sem);
1511  ret = ubi_write_fastmap(ubi, new_fm);
1512  up_write(&ubi->fm_sem);
1513  up_write(&ubi->work_sem);
1514 
1515  if (ret)
1516  goto err;
1517 
1518 out_unlock:
1519  mutex_unlock(&ubi->fm_mutex);
1520  kfree(old_fm);
1521  return ret;
1522 
1523 err:
1524  kfree(new_fm);
1525 
1526  ubi_warn("Unable to write new fastmap, err=%i", ret);
1527 
1528  ret = 0;
1529  if (old_fm) {
1530  ret = invalidate_fastmap(ubi, old_fm);
1531  if (ret < 0)
1532  ubi_err("Unable to invalidiate current fastmap!");
1533  else if (ret)
1534  ret = 0;
1535  }
1536  goto out_unlock;
1537 }