Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
io.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  * Copyright (c) Nokia Corporation, 2006, 2007
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  *
19  * Author: Artem Bityutskiy (Битюцкий Артём)
20  */
21 
22 /*
23  * UBI input/output sub-system.
24  *
25  * This sub-system provides a uniform way to work with all kinds of the
26  * underlying MTD devices. It also implements handy functions for reading and
27  * writing UBI headers.
28  *
29  * We are trying to have a paranoid mindset and not to trust to what we read
30  * from the flash media in order to be more secure and robust. So this
31  * sub-system validates every single header it reads from the flash media.
32  *
33  * Some words about how the eraseblock headers are stored.
34  *
35  * The erase counter header is always stored at offset zero. By default, the
36  * VID header is stored after the EC header at the closest aligned offset
37  * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
38  * header at the closest aligned offset. But this default layout may be
39  * changed. For example, for different reasons (e.g., optimization) UBI may be
40  * asked to put the VID header at further offset, and even at an unaligned
41  * offset. Of course, if the offset of the VID header is unaligned, UBI adds
42  * proper padding in front of it. Data offset may also be changed but it has to
43  * be aligned.
44  *
45  * About minimal I/O units. In general, UBI assumes flash device model where
46  * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
47  * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
48  * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
49  * (smaller) minimal I/O unit size for EC and VID headers to make it possible
50  * to do different optimizations.
51  *
52  * This is extremely useful in case of NAND flashes which admit of several
53  * write operations to one NAND page. In this case UBI can fit EC and VID
54  * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
55  * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
56  * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
57  * users.
58  *
59  * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
60  * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
61  * headers.
62  *
63  * Q: why not just to treat sub-page as a minimal I/O unit of this flash
64  * device, e.g., make @ubi->min_io_size = 512 in the example above?
65  *
66  * A: because when writing a sub-page, MTD still writes a full 2K page but the
67  * bytes which are not relevant to the sub-page are 0xFF. So, basically,
68  * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
69  * Thus, we prefer to use sub-pages only for EC and VID headers.
70  *
71  * As it was noted above, the VID header may start at a non-aligned offset.
72  * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
73  * the VID header may reside at offset 1984 which is the last 64 bytes of the
74  * last sub-page (EC header is always at offset zero). This causes some
75  * difficulties when reading and writing VID headers.
76  *
77  * Suppose we have a 64-byte buffer and we read a VID header at it. We change
78  * the data and want to write this VID header out. As we can only write in
79  * 512-byte chunks, we have to allocate one more buffer and copy our VID header
80  * to offset 448 of this buffer.
81  *
82  * The I/O sub-system does the following trick in order to avoid this extra
83  * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
84  * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
85  * When the VID header is being written out, it shifts the VID header pointer
86  * back and writes the whole sub-page.
87  */
88 
89 #include <linux/crc32.h>
90 #include <linux/err.h>
91 #include <linux/slab.h>
92 #include "ubi.h"
93 
94 static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
95 static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
96 static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
97  const struct ubi_ec_hdr *ec_hdr);
98 static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99 static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100  const struct ubi_vid_hdr *vid_hdr);
101 static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
102  int offset, int len);
103 
126 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
127  int len)
128 {
129  int err, retries = 0;
130  size_t read;
131  loff_t addr;
132 
133  dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
134 
135  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
136  ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
137  ubi_assert(len > 0);
138 
139  err = self_check_not_bad(ubi, pnum);
140  if (err)
141  return err;
142 
143  /*
144  * Deliberately corrupt the buffer to improve robustness. Indeed, if we
145  * do not do this, the following may happen:
146  * 1. The buffer contains data from previous operation, e.g., read from
147  * another PEB previously. The data looks like expected, e.g., if we
148  * just do not read anything and return - the caller would not
149  * notice this. E.g., if we are reading a VID header, the buffer may
150  * contain a valid VID header from another PEB.
151  * 2. The driver is buggy and returns us success or -EBADMSG or
152  * -EUCLEAN, but it does not actually put any data to the buffer.
153  *
154  * This may confuse UBI or upper layers - they may think the buffer
155  * contains valid data while in fact it is just old data. This is
156  * especially possible because UBI (and UBIFS) relies on CRC, and
157  * treats data as correct even in case of ECC errors if the CRC is
158  * correct.
159  *
160  * Try to prevent this situation by changing the first byte of the
161  * buffer.
162  */
163  *((uint8_t *)buf) ^= 0xFF;
164 
165  addr = (loff_t)pnum * ubi->peb_size + offset;
166 retry:
167  err = mtd_read(ubi->mtd, addr, len, &read, buf);
168  if (err) {
169  const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
170 
171  if (mtd_is_bitflip(err)) {
172  /*
173  * -EUCLEAN is reported if there was a bit-flip which
174  * was corrected, so this is harmless.
175  *
176  * We do not report about it here unless debugging is
177  * enabled. A corresponding message will be printed
178  * later, when it is has been scrubbed.
179  */
180  ubi_msg("fixable bit-flip detected at PEB %d", pnum);
181  ubi_assert(len == read);
182  return UBI_IO_BITFLIPS;
183  }
184 
185  if (retries++ < UBI_IO_RETRIES) {
186  ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
187  err, errstr, len, pnum, offset, read);
188  yield();
189  goto retry;
190  }
191 
192  ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
193  err, errstr, len, pnum, offset, read);
194  dump_stack();
195 
196  /*
197  * The driver should never return -EBADMSG if it failed to read
198  * all the requested data. But some buggy drivers might do
199  * this, so we change it to -EIO.
200  */
201  if (read != len && mtd_is_eccerr(err)) {
202  ubi_assert(0);
203  err = -EIO;
204  }
205  } else {
206  ubi_assert(len == read);
207 
208  if (ubi_dbg_is_bitflip(ubi)) {
209  dbg_gen("bit-flip (emulated)");
210  err = UBI_IO_BITFLIPS;
211  }
212  }
213 
214  return err;
215 }
216 
234 int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
235  int len)
236 {
237  int err;
238  size_t written;
239  loff_t addr;
240 
241  dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
242 
243  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
244  ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
245  ubi_assert(offset % ubi->hdrs_min_io_size == 0);
246  ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
247 
248  if (ubi->ro_mode) {
249  ubi_err("read-only mode");
250  return -EROFS;
251  }
252 
253  err = self_check_not_bad(ubi, pnum);
254  if (err)
255  return err;
256 
257  /* The area we are writing to has to contain all 0xFF bytes */
258  err = ubi_self_check_all_ff(ubi, pnum, offset, len);
259  if (err)
260  return err;
261 
262  if (offset >= ubi->leb_start) {
263  /*
264  * We write to the data area of the physical eraseblock. Make
265  * sure it has valid EC and VID headers.
266  */
267  err = self_check_peb_ec_hdr(ubi, pnum);
268  if (err)
269  return err;
270  err = self_check_peb_vid_hdr(ubi, pnum);
271  if (err)
272  return err;
273  }
274 
275  if (ubi_dbg_is_write_failure(ubi)) {
276  ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
277  len, pnum, offset);
278  dump_stack();
279  return -EIO;
280  }
281 
282  addr = (loff_t)pnum * ubi->peb_size + offset;
283  err = mtd_write(ubi->mtd, addr, len, &written, buf);
284  if (err) {
285  ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
286  err, len, pnum, offset, written);
287  dump_stack();
288  ubi_dump_flash(ubi, pnum, offset, len);
289  } else
290  ubi_assert(written == len);
291 
292  if (!err) {
293  err = self_check_write(ubi, buf, pnum, offset, len);
294  if (err)
295  return err;
296 
297  /*
298  * Since we always write sequentially, the rest of the PEB has
299  * to contain only 0xFF bytes.
300  */
301  offset += len;
302  len = ubi->peb_size - offset;
303  if (len)
304  err = ubi_self_check_all_ff(ubi, pnum, offset, len);
305  }
306 
307  return err;
308 }
309 
317 static void erase_callback(struct erase_info *ei)
318 {
320 }
321 
331 static int do_sync_erase(struct ubi_device *ubi, int pnum)
332 {
333  int err, retries = 0;
334  struct erase_info ei;
336 
337  dbg_io("erase PEB %d", pnum);
338  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
339 
340  if (ubi->ro_mode) {
341  ubi_err("read-only mode");
342  return -EROFS;
343  }
344 
345 retry:
346  init_waitqueue_head(&wq);
347  memset(&ei, 0, sizeof(struct erase_info));
348 
349  ei.mtd = ubi->mtd;
350  ei.addr = (loff_t)pnum * ubi->peb_size;
351  ei.len = ubi->peb_size;
352  ei.callback = erase_callback;
353  ei.priv = (unsigned long)&wq;
354 
355  err = mtd_erase(ubi->mtd, &ei);
356  if (err) {
357  if (retries++ < UBI_IO_RETRIES) {
358  ubi_warn("error %d while erasing PEB %d, retry",
359  err, pnum);
360  yield();
361  goto retry;
362  }
363  ubi_err("cannot erase PEB %d, error %d", pnum, err);
364  dump_stack();
365  return err;
366  }
367 
369  ei.state == MTD_ERASE_FAILED);
370  if (err) {
371  ubi_err("interrupted PEB %d erasure", pnum);
372  return -EINTR;
373  }
374 
375  if (ei.state == MTD_ERASE_FAILED) {
376  if (retries++ < UBI_IO_RETRIES) {
377  ubi_warn("error while erasing PEB %d, retry", pnum);
378  yield();
379  goto retry;
380  }
381  ubi_err("cannot erase PEB %d", pnum);
382  dump_stack();
383  return -EIO;
384  }
385 
386  err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
387  if (err)
388  return err;
389 
390  if (ubi_dbg_is_erase_failure(ubi)) {
391  ubi_err("cannot erase PEB %d (emulated)", pnum);
392  return -EIO;
393  }
394 
395  return 0;
396 }
397 
398 /* Patterns to write to a physical eraseblock when torturing it */
399 static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
400 
410 static int torture_peb(struct ubi_device *ubi, int pnum)
411 {
412  int err, i, patt_count;
413 
414  ubi_msg("run torture test for PEB %d", pnum);
415  patt_count = ARRAY_SIZE(patterns);
416  ubi_assert(patt_count > 0);
417 
418  mutex_lock(&ubi->buf_mutex);
419  for (i = 0; i < patt_count; i++) {
420  err = do_sync_erase(ubi, pnum);
421  if (err)
422  goto out;
423 
424  /* Make sure the PEB contains only 0xFF bytes */
425  err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
426  if (err)
427  goto out;
428 
429  err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
430  if (err == 0) {
431  ubi_err("erased PEB %d, but a non-0xFF byte found",
432  pnum);
433  err = -EIO;
434  goto out;
435  }
436 
437  /* Write a pattern and check it */
438  memset(ubi->peb_buf, patterns[i], ubi->peb_size);
439  err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
440  if (err)
441  goto out;
442 
443  memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
444  err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
445  if (err)
446  goto out;
447 
448  err = ubi_check_pattern(ubi->peb_buf, patterns[i],
449  ubi->peb_size);
450  if (err == 0) {
451  ubi_err("pattern %x checking failed for PEB %d",
452  patterns[i], pnum);
453  err = -EIO;
454  goto out;
455  }
456  }
457 
458  err = patt_count;
459  ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
460 
461 out:
462  mutex_unlock(&ubi->buf_mutex);
463  if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
464  /*
465  * If a bit-flip or data integrity error was detected, the test
466  * has not passed because it happened on a freshly erased
467  * physical eraseblock which means something is wrong with it.
468  */
469  ubi_err("read problems on freshly erased PEB %d, must be bad",
470  pnum);
471  err = -EIO;
472  }
473  return err;
474 }
475 
496 static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
497 {
498  int err, err1;
499  size_t written;
500  loff_t addr;
501  uint32_t data = 0;
502  /*
503  * Note, we cannot generally define VID header buffers on stack,
504  * because of the way we deal with these buffers (see the header
505  * comment in this file). But we know this is a NOR-specific piece of
506  * code, so we can do this. But yes, this is error-prone and we should
507  * (pre-)allocate VID header buffer instead.
508  */
509  struct ubi_vid_hdr vid_hdr;
510 
511  /*
512  * It is important to first invalidate the EC header, and then the VID
513  * header. Otherwise a power cut may lead to valid EC header and
514  * invalid VID header, in which case UBI will treat this PEB as
515  * corrupted and will try to preserve it, and print scary warnings.
516  */
517  addr = (loff_t)pnum * ubi->peb_size;
518  err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
519  if (!err) {
520  addr += ubi->vid_hdr_aloffset;
521  err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
522  if (!err)
523  return 0;
524  }
525 
526  /*
527  * We failed to write to the media. This was observed with Spansion
528  * S29GL512N NOR flash. Most probably the previously eraseblock erasure
529  * was interrupted at a very inappropriate moment, so it became
530  * unwritable. In this case we probably anyway have garbage in this
531  * PEB.
532  */
533  err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
534  if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
535  err1 == UBI_IO_FF) {
536  struct ubi_ec_hdr ec_hdr;
537 
538  err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
539  if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
540  err1 == UBI_IO_FF)
541  /*
542  * Both VID and EC headers are corrupted, so we can
543  * safely erase this PEB and not afraid that it will be
544  * treated as a valid PEB in case of an unclean reboot.
545  */
546  return 0;
547  }
548 
549  /*
550  * The PEB contains a valid VID header, but we cannot invalidate it.
551  * Supposedly the flash media or the driver is screwed up, so return an
552  * error.
553  */
554  ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
555  pnum, err, err1);
556  ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
557  return -EIO;
558 }
559 
576 int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
577 {
578  int err, ret = 0;
579 
580  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
581 
582  err = self_check_not_bad(ubi, pnum);
583  if (err != 0)
584  return err;
585 
586  if (ubi->ro_mode) {
587  ubi_err("read-only mode");
588  return -EROFS;
589  }
590 
591  if (ubi->nor_flash) {
592  err = nor_erase_prepare(ubi, pnum);
593  if (err)
594  return err;
595  }
596 
597  if (torture) {
598  ret = torture_peb(ubi, pnum);
599  if (ret < 0)
600  return ret;
601  }
602 
603  err = do_sync_erase(ubi, pnum);
604  if (err)
605  return err;
606 
607  return ret + 1;
608 }
609 
618 int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
619 {
620  struct mtd_info *mtd = ubi->mtd;
621 
622  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
623 
624  if (ubi->bad_allowed) {
625  int ret;
626 
627  ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
628  if (ret < 0)
629  ubi_err("error %d while checking if PEB %d is bad",
630  ret, pnum);
631  else if (ret)
632  dbg_io("PEB %d is bad", pnum);
633  return ret;
634  }
635 
636  return 0;
637 }
638 
647 int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
648 {
649  int err;
650  struct mtd_info *mtd = ubi->mtd;
651 
652  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
653 
654  if (ubi->ro_mode) {
655  ubi_err("read-only mode");
656  return -EROFS;
657  }
658 
659  if (!ubi->bad_allowed)
660  return 0;
661 
662  err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
663  if (err)
664  ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
665  return err;
666 }
667 
676 static int validate_ec_hdr(const struct ubi_device *ubi,
677  const struct ubi_ec_hdr *ec_hdr)
678 {
679  long long ec;
680  int vid_hdr_offset, leb_start;
681 
682  ec = be64_to_cpu(ec_hdr->ec);
683  vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
684  leb_start = be32_to_cpu(ec_hdr->data_offset);
685 
686  if (ec_hdr->version != UBI_VERSION) {
687  ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
688  UBI_VERSION, (int)ec_hdr->version);
689  goto bad;
690  }
691 
692  if (vid_hdr_offset != ubi->vid_hdr_offset) {
693  ubi_err("bad VID header offset %d, expected %d",
694  vid_hdr_offset, ubi->vid_hdr_offset);
695  goto bad;
696  }
697 
698  if (leb_start != ubi->leb_start) {
699  ubi_err("bad data offset %d, expected %d",
700  leb_start, ubi->leb_start);
701  goto bad;
702  }
703 
704  if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
705  ubi_err("bad erase counter %lld", ec);
706  goto bad;
707  }
708 
709  return 0;
710 
711 bad:
712  ubi_err("bad EC header");
713  ubi_dump_ec_hdr(ec_hdr);
714  dump_stack();
715  return 1;
716 }
717 
740 int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
741  struct ubi_ec_hdr *ec_hdr, int verbose)
742 {
743  int err, read_err;
745 
746  dbg_io("read EC header from PEB %d", pnum);
747  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
748 
749  read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
750  if (read_err) {
751  if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
752  return read_err;
753 
754  /*
755  * We read all the data, but either a correctable bit-flip
756  * occurred, or MTD reported a data integrity error
757  * (uncorrectable ECC error in case of NAND). The former is
758  * harmless, the later may mean that the read data is
759  * corrupted. But we have a CRC check-sum and we will detect
760  * this. If the EC header is still OK, we just report this as
761  * there was a bit-flip, to force scrubbing.
762  */
763  }
764 
765  magic = be32_to_cpu(ec_hdr->magic);
766  if (magic != UBI_EC_HDR_MAGIC) {
767  if (mtd_is_eccerr(read_err))
768  return UBI_IO_BAD_HDR_EBADMSG;
769 
770  /*
771  * The magic field is wrong. Let's check if we have read all
772  * 0xFF. If yes, this physical eraseblock is assumed to be
773  * empty.
774  */
775  if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
776  /* The physical eraseblock is supposedly empty */
777  if (verbose)
778  ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
779  pnum);
780  dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
781  pnum);
782  if (!read_err)
783  return UBI_IO_FF;
784  else
785  return UBI_IO_FF_BITFLIPS;
786  }
787 
788  /*
789  * This is not a valid erase counter header, and these are not
790  * 0xFF bytes. Report that the header is corrupted.
791  */
792  if (verbose) {
793  ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
794  pnum, magic, UBI_EC_HDR_MAGIC);
795  ubi_dump_ec_hdr(ec_hdr);
796  }
797  dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
798  pnum, magic, UBI_EC_HDR_MAGIC);
799  return UBI_IO_BAD_HDR;
800  }
801 
802  crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
803  hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
804 
805  if (hdr_crc != crc) {
806  if (verbose) {
807  ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
808  pnum, crc, hdr_crc);
809  ubi_dump_ec_hdr(ec_hdr);
810  }
811  dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
812  pnum, crc, hdr_crc);
813 
814  if (!read_err)
815  return UBI_IO_BAD_HDR;
816  else
817  return UBI_IO_BAD_HDR_EBADMSG;
818  }
819 
820  /* And of course validate what has just been read from the media */
821  err = validate_ec_hdr(ubi, ec_hdr);
822  if (err) {
823  ubi_err("validation failed for PEB %d", pnum);
824  return -EINVAL;
825  }
826 
827  /*
828  * If there was %-EBADMSG, but the header CRC is still OK, report about
829  * a bit-flip to force scrubbing on this PEB.
830  */
831  return read_err ? UBI_IO_BITFLIPS : 0;
832 }
833 
849 int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
850  struct ubi_ec_hdr *ec_hdr)
851 {
852  int err;
853  uint32_t crc;
854 
855  dbg_io("write EC header to PEB %d", pnum);
856  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
857 
859  ec_hdr->version = UBI_VERSION;
860  ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
861  ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
862  ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
863  crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
864  ec_hdr->hdr_crc = cpu_to_be32(crc);
865 
866  err = self_check_ec_hdr(ubi, pnum, ec_hdr);
867  if (err)
868  return err;
869 
870  err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
871  return err;
872 }
873 
882 static int validate_vid_hdr(const struct ubi_device *ubi,
883  const struct ubi_vid_hdr *vid_hdr)
884 {
885  int vol_type = vid_hdr->vol_type;
886  int copy_flag = vid_hdr->copy_flag;
887  int vol_id = be32_to_cpu(vid_hdr->vol_id);
888  int lnum = be32_to_cpu(vid_hdr->lnum);
889  int compat = vid_hdr->compat;
890  int data_size = be32_to_cpu(vid_hdr->data_size);
891  int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
892  int data_pad = be32_to_cpu(vid_hdr->data_pad);
893  int data_crc = be32_to_cpu(vid_hdr->data_crc);
894  int usable_leb_size = ubi->leb_size - data_pad;
895 
896  if (copy_flag != 0 && copy_flag != 1) {
897  ubi_err("bad copy_flag");
898  goto bad;
899  }
900 
901  if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
902  data_pad < 0) {
903  ubi_err("negative values");
904  goto bad;
905  }
906 
907  if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
908  ubi_err("bad vol_id");
909  goto bad;
910  }
911 
912  if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
913  ubi_err("bad compat");
914  goto bad;
915  }
916 
917  if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
918  compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
919  compat != UBI_COMPAT_REJECT) {
920  ubi_err("bad compat");
921  goto bad;
922  }
923 
924  if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
925  ubi_err("bad vol_type");
926  goto bad;
927  }
928 
929  if (data_pad >= ubi->leb_size / 2) {
930  ubi_err("bad data_pad");
931  goto bad;
932  }
933 
934  if (vol_type == UBI_VID_STATIC) {
935  /*
936  * Although from high-level point of view static volumes may
937  * contain zero bytes of data, but no VID headers can contain
938  * zero at these fields, because they empty volumes do not have
939  * mapped logical eraseblocks.
940  */
941  if (used_ebs == 0) {
942  ubi_err("zero used_ebs");
943  goto bad;
944  }
945  if (data_size == 0) {
946  ubi_err("zero data_size");
947  goto bad;
948  }
949  if (lnum < used_ebs - 1) {
950  if (data_size != usable_leb_size) {
951  ubi_err("bad data_size");
952  goto bad;
953  }
954  } else if (lnum == used_ebs - 1) {
955  if (data_size == 0) {
956  ubi_err("bad data_size at last LEB");
957  goto bad;
958  }
959  } else {
960  ubi_err("too high lnum");
961  goto bad;
962  }
963  } else {
964  if (copy_flag == 0) {
965  if (data_crc != 0) {
966  ubi_err("non-zero data CRC");
967  goto bad;
968  }
969  if (data_size != 0) {
970  ubi_err("non-zero data_size");
971  goto bad;
972  }
973  } else {
974  if (data_size == 0) {
975  ubi_err("zero data_size of copy");
976  goto bad;
977  }
978  }
979  if (used_ebs != 0) {
980  ubi_err("bad used_ebs");
981  goto bad;
982  }
983  }
984 
985  return 0;
986 
987 bad:
988  ubi_err("bad VID header");
989  ubi_dump_vid_hdr(vid_hdr);
990  dump_stack();
991  return 1;
992 }
993 
1010 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1011  struct ubi_vid_hdr *vid_hdr, int verbose)
1012 {
1013  int err, read_err;
1015  void *p;
1016 
1017  dbg_io("read VID header from PEB %d", pnum);
1018  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1019 
1020  p = (char *)vid_hdr - ubi->vid_hdr_shift;
1021  read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1022  ubi->vid_hdr_alsize);
1023  if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1024  return read_err;
1025 
1026  magic = be32_to_cpu(vid_hdr->magic);
1027  if (magic != UBI_VID_HDR_MAGIC) {
1028  if (mtd_is_eccerr(read_err))
1029  return UBI_IO_BAD_HDR_EBADMSG;
1030 
1031  if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1032  if (verbose)
1033  ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
1034  pnum);
1035  dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1036  pnum);
1037  if (!read_err)
1038  return UBI_IO_FF;
1039  else
1040  return UBI_IO_FF_BITFLIPS;
1041  }
1042 
1043  if (verbose) {
1044  ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
1045  pnum, magic, UBI_VID_HDR_MAGIC);
1046  ubi_dump_vid_hdr(vid_hdr);
1047  }
1048  dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1049  pnum, magic, UBI_VID_HDR_MAGIC);
1050  return UBI_IO_BAD_HDR;
1051  }
1052 
1053  crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1054  hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1055 
1056  if (hdr_crc != crc) {
1057  if (verbose) {
1058  ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
1059  pnum, crc, hdr_crc);
1060  ubi_dump_vid_hdr(vid_hdr);
1061  }
1062  dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1063  pnum, crc, hdr_crc);
1064  if (!read_err)
1065  return UBI_IO_BAD_HDR;
1066  else
1067  return UBI_IO_BAD_HDR_EBADMSG;
1068  }
1069 
1070  err = validate_vid_hdr(ubi, vid_hdr);
1071  if (err) {
1072  ubi_err("validation failed for PEB %d", pnum);
1073  return -EINVAL;
1074  }
1075 
1076  return read_err ? UBI_IO_BITFLIPS : 0;
1077 }
1078 
1094 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1095  struct ubi_vid_hdr *vid_hdr)
1096 {
1097  int err;
1098  uint32_t crc;
1099  void *p;
1100 
1101  dbg_io("write VID header to PEB %d", pnum);
1102  ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1103 
1104  err = self_check_peb_ec_hdr(ubi, pnum);
1105  if (err)
1106  return err;
1107 
1108  vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1109  vid_hdr->version = UBI_VERSION;
1110  crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1111  vid_hdr->hdr_crc = cpu_to_be32(crc);
1112 
1113  err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1114  if (err)
1115  return err;
1116 
1117  p = (char *)vid_hdr - ubi->vid_hdr_shift;
1118  err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1119  ubi->vid_hdr_alsize);
1120  return err;
1121 }
1122 
1131 static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1132 {
1133  int err;
1134 
1135  if (!ubi->dbg->chk_io)
1136  return 0;
1137 
1138  err = ubi_io_is_bad(ubi, pnum);
1139  if (!err)
1140  return err;
1141 
1142  ubi_err("self-check failed for PEB %d", pnum);
1143  dump_stack();
1144  return err > 0 ? -EINVAL : err;
1145 }
1146 
1156 static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1157  const struct ubi_ec_hdr *ec_hdr)
1158 {
1159  int err;
1160  uint32_t magic;
1161 
1162  if (!ubi->dbg->chk_io)
1163  return 0;
1164 
1165  magic = be32_to_cpu(ec_hdr->magic);
1166  if (magic != UBI_EC_HDR_MAGIC) {
1167  ubi_err("bad magic %#08x, must be %#08x",
1168  magic, UBI_EC_HDR_MAGIC);
1169  goto fail;
1170  }
1171 
1172  err = validate_ec_hdr(ubi, ec_hdr);
1173  if (err) {
1174  ubi_err("self-check failed for PEB %d", pnum);
1175  goto fail;
1176  }
1177 
1178  return 0;
1179 
1180 fail:
1181  ubi_dump_ec_hdr(ec_hdr);
1182  dump_stack();
1183  return -EINVAL;
1184 }
1185 
1194 static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1195 {
1196  int err;
1197  uint32_t crc, hdr_crc;
1198  struct ubi_ec_hdr *ec_hdr;
1199 
1200  if (!ubi->dbg->chk_io)
1201  return 0;
1202 
1203  ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1204  if (!ec_hdr)
1205  return -ENOMEM;
1206 
1207  err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1208  if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1209  goto exit;
1210 
1211  crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1212  hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1213  if (hdr_crc != crc) {
1214  ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
1215  ubi_err("self-check failed for PEB %d", pnum);
1216  ubi_dump_ec_hdr(ec_hdr);
1217  dump_stack();
1218  err = -EINVAL;
1219  goto exit;
1220  }
1221 
1222  err = self_check_ec_hdr(ubi, pnum, ec_hdr);
1223 
1224 exit:
1225  kfree(ec_hdr);
1226  return err;
1227 }
1228 
1238 static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1239  const struct ubi_vid_hdr *vid_hdr)
1240 {
1241  int err;
1242  uint32_t magic;
1243 
1244  if (!ubi->dbg->chk_io)
1245  return 0;
1246 
1247  magic = be32_to_cpu(vid_hdr->magic);
1248  if (magic != UBI_VID_HDR_MAGIC) {
1249  ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
1250  magic, pnum, UBI_VID_HDR_MAGIC);
1251  goto fail;
1252  }
1253 
1254  err = validate_vid_hdr(ubi, vid_hdr);
1255  if (err) {
1256  ubi_err("self-check failed for PEB %d", pnum);
1257  goto fail;
1258  }
1259 
1260  return err;
1261 
1262 fail:
1263  ubi_err("self-check failed for PEB %d", pnum);
1264  ubi_dump_vid_hdr(vid_hdr);
1265  dump_stack();
1266  return -EINVAL;
1267 
1268 }
1269 
1278 static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1279 {
1280  int err;
1281  uint32_t crc, hdr_crc;
1282  struct ubi_vid_hdr *vid_hdr;
1283  void *p;
1284 
1285  if (!ubi->dbg->chk_io)
1286  return 0;
1287 
1288  vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1289  if (!vid_hdr)
1290  return -ENOMEM;
1291 
1292  p = (char *)vid_hdr - ubi->vid_hdr_shift;
1293  err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1294  ubi->vid_hdr_alsize);
1295  if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1296  goto exit;
1297 
1298  crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1299  hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1300  if (hdr_crc != crc) {
1301  ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1302  pnum, crc, hdr_crc);
1303  ubi_err("self-check failed for PEB %d", pnum);
1304  ubi_dump_vid_hdr(vid_hdr);
1305  dump_stack();
1306  err = -EINVAL;
1307  goto exit;
1308  }
1309 
1310  err = self_check_vid_hdr(ubi, pnum, vid_hdr);
1311 
1312 exit:
1313  ubi_free_vid_hdr(ubi, vid_hdr);
1314  return err;
1315 }
1316 
1329 static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1330  int offset, int len)
1331 {
1332  int err, i;
1333  size_t read;
1334  void *buf1;
1335  loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1336 
1337  if (!ubi->dbg->chk_io)
1338  return 0;
1339 
1340  buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1341  if (!buf1) {
1342  ubi_err("cannot allocate memory to check writes");
1343  return 0;
1344  }
1345 
1346  err = mtd_read(ubi->mtd, addr, len, &read, buf1);
1347  if (err && !mtd_is_bitflip(err))
1348  goto out_free;
1349 
1350  for (i = 0; i < len; i++) {
1351  uint8_t c = ((uint8_t *)buf)[i];
1352  uint8_t c1 = ((uint8_t *)buf1)[i];
1353  int dump_len;
1354 
1355  if (c == c1)
1356  continue;
1357 
1358  ubi_err("self-check failed for PEB %d:%d, len %d",
1359  pnum, offset, len);
1360  ubi_msg("data differ at position %d", i);
1361  dump_len = max_t(int, 128, len - i);
1362  ubi_msg("hex dump of the original buffer from %d to %d",
1363  i, i + dump_len);
1364  print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1365  buf + i, dump_len, 1);
1366  ubi_msg("hex dump of the read buffer from %d to %d",
1367  i, i + dump_len);
1368  print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1369  buf1 + i, dump_len, 1);
1370  dump_stack();
1371  err = -EINVAL;
1372  goto out_free;
1373  }
1374 
1375  vfree(buf1);
1376  return 0;
1377 
1378 out_free:
1379  vfree(buf1);
1380  return err;
1381 }
1382 
1394 int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1395 {
1396  size_t read;
1397  int err;
1398  void *buf;
1399  loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1400 
1401  if (!ubi->dbg->chk_io)
1402  return 0;
1403 
1404  buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1405  if (!buf) {
1406  ubi_err("cannot allocate memory to check for 0xFFs");
1407  return 0;
1408  }
1409 
1410  err = mtd_read(ubi->mtd, addr, len, &read, buf);
1411  if (err && !mtd_is_bitflip(err)) {
1412  ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1413  err, len, pnum, offset, read);
1414  goto error;
1415  }
1416 
1417  err = ubi_check_pattern(buf, 0xFF, len);
1418  if (err == 0) {
1419  ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1420  pnum, offset, len);
1421  goto fail;
1422  }
1423 
1424  vfree(buf);
1425  return 0;
1426 
1427 fail:
1428  ubi_err("self-check failed for PEB %d", pnum);
1429  ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1430  print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1431  err = -EINVAL;
1432 error:
1433  dump_stack();
1434  vfree(buf);
1435  return err;
1436 }