Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sm_ftl.c
Go to the documentation of this file.
1 /*
2  * Copyright © 2009 - Maxim Levitsky
3  * SmartMedia/xD translation layer
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/hdreg.h>
14 #include <linux/kthread.h>
15 #include <linux/freezer.h>
16 #include <linux/sysfs.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include <linux/mtd/nand_ecc.h>
20 #include "nand/sm_common.h"
21 #include "sm_ftl.h"
22 
23 
24 
26 
27 static int cache_timeout = 1000;
28 module_param(cache_timeout, int, S_IRUGO);
29 MODULE_PARM_DESC(cache_timeout,
30  "Timeout (in ms) for cache flush (1000 ms default");
31 
32 static int debug;
34 MODULE_PARM_DESC(debug, "Debug level (0-2)");
35 
36 
37 /* ------------------- sysfs attributes ---------------------------------- */
40  char *data;
41  int len;
42 };
43 
45  char *buf)
46 {
47  struct sm_sysfs_attribute *sm_attr =
49 
50  strncpy(buf, sm_attr->data, sm_attr->len);
51  return sm_attr->len;
52 }
53 
54 
55 #define NUM_ATTRIBUTES 1
56 #define SM_CIS_VENDOR_OFFSET 0x59
58 {
59  struct attribute_group *attr_group;
60  struct attribute **attributes;
61  struct sm_sysfs_attribute *vendor_attribute;
62 
63  int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
65 
66  char *vendor = kmalloc(vendor_len, GFP_KERNEL);
67  if (!vendor)
68  goto error1;
69  memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
70  vendor[vendor_len] = 0;
71 
72  /* Initialize sysfs attributes */
73  vendor_attribute =
74  kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
75  if (!vendor_attribute)
76  goto error2;
77 
78  sysfs_attr_init(&vendor_attribute->dev_attr.attr);
79 
80  vendor_attribute->data = vendor;
81  vendor_attribute->len = vendor_len;
82  vendor_attribute->dev_attr.attr.name = "vendor";
83  vendor_attribute->dev_attr.attr.mode = S_IRUGO;
84  vendor_attribute->dev_attr.show = sm_attr_show;
85 
86 
87  /* Create array of pointers to the attributes */
88  attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
89  GFP_KERNEL);
90  if (!attributes)
91  goto error3;
92  attributes[0] = &vendor_attribute->dev_attr.attr;
93 
94  /* Finally create the attribute group */
95  attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
96  if (!attr_group)
97  goto error4;
98  attr_group->attrs = attributes;
99  return attr_group;
100 error4:
101  kfree(attributes);
102 error3:
103  kfree(vendor_attribute);
104 error2:
105  kfree(vendor);
106 error1:
107  return NULL;
108 }
109 
111 {
112  struct attribute **attributes = ftl->disk_attributes->attrs;
113  int i;
114 
115  for (i = 0; attributes[i] ; i++) {
116 
117  struct device_attribute *dev_attr = container_of(attributes[i],
118  struct device_attribute, attr);
119 
120  struct sm_sysfs_attribute *sm_attr =
121  container_of(dev_attr,
122  struct sm_sysfs_attribute, dev_attr);
123 
124  kfree(sm_attr->data);
125  kfree(sm_attr);
126  }
127 
128  kfree(ftl->disk_attributes->attrs);
129  kfree(ftl->disk_attributes);
130 }
131 
132 
133 /* ----------------------- oob helpers -------------------------------------- */
134 
135 static int sm_get_lba(uint8_t *lba)
136 {
137  /* check fixed bits */
138  if ((lba[0] & 0xF8) != 0x10)
139  return -2;
140 
141  /* check parity - endianness doesn't matter */
142  if (hweight16(*(uint16_t *)lba) & 1)
143  return -2;
144 
145  return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
146 }
147 
148 
149 /*
150  * Read LBA associated with block
151  * returns -1, if block is erased
152  * returns -2 if error happens
153  */
154 static int sm_read_lba(struct sm_oob *oob)
155 {
156  static const uint32_t erased_pattern[4] = {
157  0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
158 
159  uint16_t lba_test;
160  int lba;
161 
162  /* First test for erased block */
163  if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
164  return -1;
165 
166  /* Now check is both copies of the LBA differ too much */
167  lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
168  if (lba_test && !is_power_of_2(lba_test))
169  return -2;
170 
171  /* And read it */
172  lba = sm_get_lba(oob->lba_copy1);
173 
174  if (lba == -2)
175  lba = sm_get_lba(oob->lba_copy2);
176 
177  return lba;
178 }
179 
180 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
181 {
182  uint8_t tmp[2];
183 
184  WARN_ON(lba >= 1000);
185 
186  tmp[0] = 0x10 | ((lba >> 7) & 0x07);
187  tmp[1] = (lba << 1) & 0xFF;
188 
189  if (hweight16(*(uint16_t *)tmp) & 0x01)
190  tmp[1] |= 1;
191 
192  oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
193  oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
194 }
195 
196 
197 /* Make offset from parts */
198 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
199 {
200  WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
202  WARN_ON(block >= ftl->zone_size);
203  WARN_ON(boffset >= ftl->block_size);
204 
205  if (block == -1)
206  return -1;
207 
208  return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
209 }
210 
211 /* Breaks offset into parts */
212 static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
213  int *zone, int *block, int *boffset)
214 {
215  *boffset = do_div(offset, ftl->block_size);
216  *block = do_div(offset, ftl->max_lba);
217  *zone = offset >= ftl->zone_count ? -1 : offset;
218 }
219 
220 /* ---------------------- low level IO ------------------------------------- */
221 
222 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
223 {
224  uint8_t ecc[3];
225 
226  __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
227  if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
228  return -EIO;
229 
230  buffer += SM_SMALL_PAGE;
231 
232  __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
233  if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
234  return -EIO;
235  return 0;
236 }
237 
238 /* Reads a sector + oob*/
239 static int sm_read_sector(struct sm_ftl *ftl,
240  int zone, int block, int boffset,
241  uint8_t *buffer, struct sm_oob *oob)
242 {
243  struct mtd_info *mtd = ftl->trans->mtd;
244  struct mtd_oob_ops ops;
245  struct sm_oob tmp_oob;
246  int ret = -EIO;
247  int try = 0;
248 
249  /* FTL can contain -1 entries that are by default filled with bits */
250  if (block == -1) {
251  memset(buffer, 0xFF, SM_SECTOR_SIZE);
252  return 0;
253  }
254 
255  /* User might not need the oob, but we do for data verification */
256  if (!oob)
257  oob = &tmp_oob;
258 
260  ops.ooboffs = 0;
261  ops.ooblen = SM_OOB_SIZE;
262  ops.oobbuf = (void *)oob;
263  ops.len = SM_SECTOR_SIZE;
264  ops.datbuf = buffer;
265 
266 again:
267  if (try++) {
268  /* Avoid infinite recursion on CIS reads, sm_recheck_media
269  won't help anyway */
270  if (zone == 0 && block == ftl->cis_block && boffset ==
271  ftl->cis_boffset)
272  return ret;
273 
274  /* Test if media is stable */
275  if (try == 3 || sm_recheck_media(ftl))
276  return ret;
277  }
278 
279  /* Unfortunately, oob read will _always_ succeed,
280  despite card removal..... */
281  ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
282 
283  /* Test for unknown errors */
284  if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
285  dbg("read of block %d at zone %d, failed due to error (%d)",
286  block, zone, ret);
287  goto again;
288  }
289 
290  /* Do a basic test on the oob, to guard against returned garbage */
291  if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
292  goto again;
293 
294  /* This should never happen, unless there is a bug in the mtd driver */
295  WARN_ON(ops.oobretlen != SM_OOB_SIZE);
296  WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
297 
298  if (!buffer)
299  return 0;
300 
301  /* Test if sector marked as bad */
302  if (!sm_sector_valid(oob)) {
303  dbg("read of block %d at zone %d, failed because it is marked"
304  " as bad" , block, zone);
305  goto again;
306  }
307 
308  /* Test ECC*/
309  if (mtd_is_eccerr(ret) ||
310  (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
311 
312  dbg("read of block %d at zone %d, failed due to ECC error",
313  block, zone);
314  goto again;
315  }
316 
317  return 0;
318 }
319 
320 /* Writes a sector to media */
321 static int sm_write_sector(struct sm_ftl *ftl,
322  int zone, int block, int boffset,
323  uint8_t *buffer, struct sm_oob *oob)
324 {
325  struct mtd_oob_ops ops;
326  struct mtd_info *mtd = ftl->trans->mtd;
327  int ret;
328 
329  BUG_ON(ftl->readonly);
330 
331  if (zone == 0 && (block == ftl->cis_block || block == 0)) {
332  dbg("attempted to write the CIS!");
333  return -EIO;
334  }
335 
336  if (ftl->unstable)
337  return -EIO;
338 
340  ops.len = SM_SECTOR_SIZE;
341  ops.datbuf = buffer;
342  ops.ooboffs = 0;
343  ops.ooblen = SM_OOB_SIZE;
344  ops.oobbuf = (void *)oob;
345 
346  ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
347 
348  /* Now we assume that hardware will catch write bitflip errors */
349 
350  if (ret) {
351  dbg("write to block %d at zone %d, failed with error %d",
352  block, zone, ret);
353 
354  sm_recheck_media(ftl);
355  return ret;
356  }
357 
358  /* This should never happen, unless there is a bug in the driver */
359  WARN_ON(ops.oobretlen != SM_OOB_SIZE);
360  WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
361 
362  return 0;
363 }
364 
365 /* ------------------------ block IO ------------------------------------- */
366 
367 /* Write a block using data and lba, and invalid sector bitmap */
368 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
369  int zone, int block, int lba,
370  unsigned long invalid_bitmap)
371 {
372  struct sm_oob oob;
373  int boffset;
374  int retry = 0;
375 
376  /* Initialize the oob with requested values */
377  memset(&oob, 0xFF, SM_OOB_SIZE);
378  sm_write_lba(&oob, lba);
379 restart:
380  if (ftl->unstable)
381  return -EIO;
382 
383  for (boffset = 0; boffset < ftl->block_size;
384  boffset += SM_SECTOR_SIZE) {
385 
386  oob.data_status = 0xFF;
387 
388  if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
389 
390  sm_printk("sector %d of block at LBA %d of zone %d"
391  " coudn't be read, marking it as invalid",
392  boffset / SM_SECTOR_SIZE, lba, zone);
393 
394  oob.data_status = 0;
395  }
396 
397  if (ftl->smallpagenand) {
398  __nand_calculate_ecc(buf + boffset,
399  SM_SMALL_PAGE, oob.ecc1);
400 
401  __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
402  SM_SMALL_PAGE, oob.ecc2);
403  }
404  if (!sm_write_sector(ftl, zone, block, boffset,
405  buf + boffset, &oob))
406  continue;
407 
408  if (!retry) {
409 
410  /* If write fails. try to erase the block */
411  /* This is safe, because we never write in blocks
412  that contain valuable data.
413  This is intended to repair block that are marked
414  as erased, but that isn't fully erased*/
415 
416  if (sm_erase_block(ftl, zone, block, 0))
417  return -EIO;
418 
419  retry = 1;
420  goto restart;
421  } else {
422  sm_mark_block_bad(ftl, zone, block);
423  return -EIO;
424  }
425  }
426  return 0;
427 }
428 
429 
430 /* Mark whole block at offset 'offs' as bad. */
431 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
432 {
433  struct sm_oob oob;
434  int boffset;
435 
436  memset(&oob, 0xFF, SM_OOB_SIZE);
437  oob.block_status = 0xF0;
438 
439  if (ftl->unstable)
440  return;
441 
442  if (sm_recheck_media(ftl))
443  return;
444 
445  sm_printk("marking block %d of zone %d as bad", block, zone);
446 
447  /* We aren't checking the return value, because we don't care */
448  /* This also fails on fake xD cards, but I guess these won't expose
449  any bad blocks till fail completely */
450  for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
451  sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
452 }
453 
454 /*
455  * Erase a block within a zone
456  * If erase succeeds, it updates free block fifo, otherwise marks block as bad
457  */
458 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
459  int put_free)
460 {
461  struct ftl_zone *zone = &ftl->zones[zone_num];
462  struct mtd_info *mtd = ftl->trans->mtd;
463  struct erase_info erase;
464 
465  erase.mtd = mtd;
466  erase.callback = sm_erase_callback;
467  erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
468  erase.len = ftl->block_size;
469  erase.priv = (u_long)ftl;
470 
471  if (ftl->unstable)
472  return -EIO;
473 
474  BUG_ON(ftl->readonly);
475 
476  if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
477  sm_printk("attempted to erase the CIS!");
478  return -EIO;
479  }
480 
481  if (mtd_erase(mtd, &erase)) {
482  sm_printk("erase of block %d in zone %d failed",
483  block, zone_num);
484  goto error;
485  }
486 
487  if (erase.state == MTD_ERASE_PENDING)
489 
490  if (erase.state != MTD_ERASE_DONE) {
491  sm_printk("erase of block %d in zone %d failed after wait",
492  block, zone_num);
493  goto error;
494  }
495 
496  if (put_free)
497  kfifo_in(&zone->free_sectors,
498  (const unsigned char *)&block, sizeof(block));
499 
500  return 0;
501 error:
502  sm_mark_block_bad(ftl, zone_num, block);
503  return -EIO;
504 }
505 
506 static void sm_erase_callback(struct erase_info *self)
507 {
508  struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
509  complete(&ftl->erase_completion);
510 }
511 
512 /* Thoroughly test that block is valid. */
513 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
514 {
515  int boffset;
516  struct sm_oob oob;
517  int lbas[] = { -3, 0, 0, 0 };
518  int i = 0;
519  int test_lba;
520 
521 
522  /* First just check that block doesn't look fishy */
523  /* Only blocks that are valid or are sliced in two parts, are
524  accepted */
525  for (boffset = 0; boffset < ftl->block_size;
526  boffset += SM_SECTOR_SIZE) {
527 
528  /* This shouldn't happen anyway */
529  if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
530  return -2;
531 
532  test_lba = sm_read_lba(&oob);
533 
534  if (lbas[i] != test_lba)
535  lbas[++i] = test_lba;
536 
537  /* If we found three different LBAs, something is fishy */
538  if (i == 3)
539  return -EIO;
540  }
541 
542  /* If the block is sliced (partially erased usually) erase it */
543  if (i == 2) {
544  sm_erase_block(ftl, zone, block, 1);
545  return 1;
546  }
547 
548  return 0;
549 }
550 
551 /* ----------------- media scanning --------------------------------- */
552 static const struct chs_entry chs_table[] = {
553  { 1, 125, 4, 4 },
554  { 2, 125, 4, 8 },
555  { 4, 250, 4, 8 },
556  { 8, 250, 4, 16 },
557  { 16, 500, 4, 16 },
558  { 32, 500, 8, 16 },
559  { 64, 500, 8, 32 },
560  { 128, 500, 16, 32 },
561  { 256, 1000, 16, 32 },
562  { 512, 1015, 32, 63 },
563  { 1024, 985, 33, 63 },
564  { 2048, 985, 33, 63 },
565  { 0 },
566 };
567 
568 
569 static const uint8_t cis_signature[] = {
570  0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
571 };
572 /* Find out media parameters.
573  * This ideally has to be based on nand id, but for now device size is enough */
574 int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
575 {
576  int i;
577  int size_in_megs = mtd->size / (1024 * 1024);
578 
579  ftl->readonly = mtd->type == MTD_ROM;
580 
581  /* Manual settings for very old devices */
582  ftl->zone_count = 1;
583  ftl->smallpagenand = 0;
584 
585  switch (size_in_megs) {
586  case 1:
587  /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
588  ftl->zone_size = 256;
589  ftl->max_lba = 250;
590  ftl->block_size = 8 * SM_SECTOR_SIZE;
591  ftl->smallpagenand = 1;
592 
593  break;
594  case 2:
595  /* 2 MiB flash SmartMedia (256 byte pages)*/
596  if (mtd->writesize == SM_SMALL_PAGE) {
597  ftl->zone_size = 512;
598  ftl->max_lba = 500;
599  ftl->block_size = 8 * SM_SECTOR_SIZE;
600  ftl->smallpagenand = 1;
601  /* 2 MiB rom SmartMedia */
602  } else {
603 
604  if (!ftl->readonly)
605  return -ENODEV;
606 
607  ftl->zone_size = 256;
608  ftl->max_lba = 250;
609  ftl->block_size = 16 * SM_SECTOR_SIZE;
610  }
611  break;
612  case 4:
613  /* 4 MiB flash/rom SmartMedia device */
614  ftl->zone_size = 512;
615  ftl->max_lba = 500;
616  ftl->block_size = 16 * SM_SECTOR_SIZE;
617  break;
618  case 8:
619  /* 8 MiB flash/rom SmartMedia device */
620  ftl->zone_size = 1024;
621  ftl->max_lba = 1000;
622  ftl->block_size = 16 * SM_SECTOR_SIZE;
623  }
624 
625  /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
626  sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
627  if (size_in_megs >= 16) {
628  ftl->zone_count = size_in_megs / 16;
629  ftl->zone_size = 1024;
630  ftl->max_lba = 1000;
631  ftl->block_size = 32 * SM_SECTOR_SIZE;
632  }
633 
634  /* Test for proper write,erase and oob sizes */
635  if (mtd->erasesize > ftl->block_size)
636  return -ENODEV;
637 
638  if (mtd->writesize > SM_SECTOR_SIZE)
639  return -ENODEV;
640 
641  if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
642  return -ENODEV;
643 
644  if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
645  return -ENODEV;
646 
647  /* We use OOB */
648  if (!mtd_has_oob(mtd))
649  return -ENODEV;
650 
651  /* Find geometry information */
652  for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
653  if (chs_table[i].size == size_in_megs) {
654  ftl->cylinders = chs_table[i].cyl;
655  ftl->heads = chs_table[i].head;
656  ftl->sectors = chs_table[i].sec;
657  return 0;
658  }
659  }
660 
661  sm_printk("media has unknown size : %dMiB", size_in_megs);
662  ftl->cylinders = 985;
663  ftl->heads = 33;
664  ftl->sectors = 63;
665  return 0;
666 }
667 
668 /* Validate the CIS */
669 static int sm_read_cis(struct sm_ftl *ftl)
670 {
671  struct sm_oob oob;
672 
673  if (sm_read_sector(ftl,
674  0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
675  return -EIO;
676 
677  if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
678  return -EIO;
679 
680  if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
681  cis_signature, sizeof(cis_signature))) {
682  return 0;
683  }
684 
685  return -EIO;
686 }
687 
688 /* Scan the media for the CIS */
689 static int sm_find_cis(struct sm_ftl *ftl)
690 {
691  struct sm_oob oob;
692  int block, boffset;
693  int block_found = 0;
694  int cis_found = 0;
695 
696  /* Search for first valid block */
697  for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
698 
699  if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
700  continue;
701 
702  if (!sm_block_valid(&oob))
703  continue;
704  block_found = 1;
705  break;
706  }
707 
708  if (!block_found)
709  return -EIO;
710 
711  /* Search for first valid sector in this block */
712  for (boffset = 0 ; boffset < ftl->block_size;
713  boffset += SM_SECTOR_SIZE) {
714 
715  if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
716  continue;
717 
718  if (!sm_sector_valid(&oob))
719  continue;
720  break;
721  }
722 
723  if (boffset == ftl->block_size)
724  return -EIO;
725 
726  ftl->cis_block = block;
727  ftl->cis_boffset = boffset;
728  ftl->cis_page_offset = 0;
729 
730  cis_found = !sm_read_cis(ftl);
731 
732  if (!cis_found) {
734  cis_found = !sm_read_cis(ftl);
735  }
736 
737  if (cis_found) {
738  dbg("CIS block found at offset %x",
739  block * ftl->block_size +
740  boffset + ftl->cis_page_offset);
741  return 0;
742  }
743  return -EIO;
744 }
745 
746 /* Basic test to determine if underlying mtd device if functional */
747 static int sm_recheck_media(struct sm_ftl *ftl)
748 {
749  if (sm_read_cis(ftl)) {
750 
751  if (!ftl->unstable) {
752  sm_printk("media unstable, not allowing writes");
753  ftl->unstable = 1;
754  }
755  return -EIO;
756  }
757  return 0;
758 }
759 
760 /* Initialize a FTL zone */
761 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
762 {
763  struct ftl_zone *zone = &ftl->zones[zone_num];
764  struct sm_oob oob;
765  uint16_t block;
766  int lba;
767  int i = 0;
768  int len;
769 
770  dbg("initializing zone %d", zone_num);
771 
772  /* Allocate memory for FTL table */
773  zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
774 
775  if (!zone->lba_to_phys_table)
776  return -ENOMEM;
777  memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
778 
779 
780  /* Allocate memory for free sectors FIFO */
781  if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
782  kfree(zone->lba_to_phys_table);
783  return -ENOMEM;
784  }
785 
786  /* Now scan the zone */
787  for (block = 0 ; block < ftl->zone_size ; block++) {
788 
789  /* Skip blocks till the CIS (including) */
790  if (zone_num == 0 && block <= ftl->cis_block)
791  continue;
792 
793  /* Read the oob of first sector */
794  if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
795  return -EIO;
796 
797  /* Test to see if block is erased. It is enough to test
798  first sector, because erase happens in one shot */
799  if (sm_block_erased(&oob)) {
800  kfifo_in(&zone->free_sectors,
801  (unsigned char *)&block, 2);
802  continue;
803  }
804 
805  /* If block is marked as bad, skip it */
806  /* This assumes we can trust first sector*/
807  /* However the way the block valid status is defined, ensures
808  very low probability of failure here */
809  if (!sm_block_valid(&oob)) {
810  dbg("PH %04d <-> <marked bad>", block);
811  continue;
812  }
813 
814 
815  lba = sm_read_lba(&oob);
816 
817  /* Invalid LBA means that block is damaged. */
818  /* We can try to erase it, or mark it as bad, but
819  lets leave that to recovery application */
820  if (lba == -2 || lba >= ftl->max_lba) {
821  dbg("PH %04d <-> LBA %04d(bad)", block, lba);
822  continue;
823  }
824 
825 
826  /* If there is no collision,
827  just put the sector in the FTL table */
828  if (zone->lba_to_phys_table[lba] < 0) {
829  dbg_verbose("PH %04d <-> LBA %04d", block, lba);
830  zone->lba_to_phys_table[lba] = block;
831  continue;
832  }
833 
834  sm_printk("collision"
835  " of LBA %d between blocks %d and %d in zone %d",
836  lba, zone->lba_to_phys_table[lba], block, zone_num);
837 
838  /* Test that this block is valid*/
839  if (sm_check_block(ftl, zone_num, block))
840  continue;
841 
842  /* Test now the old block */
843  if (sm_check_block(ftl, zone_num,
844  zone->lba_to_phys_table[lba])) {
845  zone->lba_to_phys_table[lba] = block;
846  continue;
847  }
848 
849  /* If both blocks are valid and share same LBA, it means that
850  they hold different versions of same data. It not
851  known which is more recent, thus just erase one of them
852  */
853  sm_printk("both blocks are valid, erasing the later");
854  sm_erase_block(ftl, zone_num, block, 1);
855  }
856 
857  dbg("zone initialized");
858  zone->initialized = 1;
859 
860  /* No free sectors, means that the zone is heavily damaged, write won't
861  work, but it can still can be (partially) read */
862  if (!kfifo_len(&zone->free_sectors)) {
863  sm_printk("no free blocks in zone %d", zone_num);
864  return 0;
865  }
866 
867  /* Randomize first block we write to */
868  get_random_bytes(&i, 2);
869  i %= (kfifo_len(&zone->free_sectors) / 2);
870 
871  while (i--) {
872  len = kfifo_out(&zone->free_sectors,
873  (unsigned char *)&block, 2);
874  WARN_ON(len != 2);
875  kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
876  }
877  return 0;
878 }
879 
880 /* Get and automatically initialize an FTL mapping for one zone */
881 struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
882 {
883  struct ftl_zone *zone;
884  int error;
885 
886  BUG_ON(zone_num >= ftl->zone_count);
887  zone = &ftl->zones[zone_num];
888 
889  if (!zone->initialized) {
890  error = sm_init_zone(ftl, zone_num);
891 
892  if (error)
893  return ERR_PTR(error);
894  }
895  return zone;
896 }
897 
898 
899 /* ----------------- cache handling ------------------------------------------*/
900 
901 /* Initialize the one block cache */
902 void sm_cache_init(struct sm_ftl *ftl)
903 {
904  ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
905  ftl->cache_clean = 1;
906  ftl->cache_zone = -1;
907  ftl->cache_block = -1;
908  /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
909 }
910 
911 /* Put sector in one block cache */
912 void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
913 {
914  memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
916  ftl->cache_clean = 0;
917 }
918 
919 /* Read a sector from the cache */
920 int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
921 {
922  if (test_bit(boffset / SM_SECTOR_SIZE,
924  return -1;
925 
926  memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
927  return 0;
928 }
929 
930 /* Write the cache to hardware */
931 int sm_cache_flush(struct sm_ftl *ftl)
932 {
933  struct ftl_zone *zone;
934 
935  int sector_num;
936  uint16_t write_sector;
937  int zone_num = ftl->cache_zone;
938  int block_num;
939 
940  if (ftl->cache_clean)
941  return 0;
942 
943  if (ftl->unstable)
944  return -EIO;
945 
946  BUG_ON(zone_num < 0);
947  zone = &ftl->zones[zone_num];
948  block_num = zone->lba_to_phys_table[ftl->cache_block];
949 
950 
951  /* Try to read all unread areas of the cache block*/
953  ftl->block_size / SM_SECTOR_SIZE) {
954 
955  if (!sm_read_sector(ftl,
956  zone_num, block_num, sector_num * SM_SECTOR_SIZE,
957  ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
958  clear_bit(sector_num,
960  }
961 restart:
962 
963  if (ftl->unstable)
964  return -EIO;
965 
966  /* If there are no spare blocks, */
967  /* we could still continue by erasing/writing the current block,
968  but for such worn out media it doesn't worth the trouble,
969  and the dangers */
970  if (kfifo_out(&zone->free_sectors,
971  (unsigned char *)&write_sector, 2) != 2) {
972  dbg("no free sectors for write!");
973  return -EIO;
974  }
975 
976 
977  if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
979  goto restart;
980 
981  /* Update the FTL table */
982  zone->lba_to_phys_table[ftl->cache_block] = write_sector;
983 
984  /* Write succesfull, so erase and free the old block */
985  if (block_num > 0)
986  sm_erase_block(ftl, zone_num, block_num, 1);
987 
988  sm_cache_init(ftl);
989  return 0;
990 }
991 
992 
993 /* flush timer, runs a second after last write */
994 static void sm_cache_flush_timer(unsigned long data)
995 {
996  struct sm_ftl *ftl = (struct sm_ftl *)data;
997  queue_work(cache_flush_workqueue, &ftl->flush_work);
998 }
999 
1000 /* cache flush work, kicked by timer */
1001 static void sm_cache_flush_work(struct work_struct *work)
1002 {
1003  struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
1004  mutex_lock(&ftl->mutex);
1005  sm_cache_flush(ftl);
1006  mutex_unlock(&ftl->mutex);
1007  return;
1008 }
1009 
1010 /* ---------------- outside interface -------------------------------------- */
1011 
1012 /* outside interface: read a sector */
1013 static int sm_read(struct mtd_blktrans_dev *dev,
1014  unsigned long sect_no, char *buf)
1015 {
1016  struct sm_ftl *ftl = dev->priv;
1017  struct ftl_zone *zone;
1018  int error = 0, in_cache = 0;
1019  int zone_num, block, boffset;
1020 
1021  sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1022  mutex_lock(&ftl->mutex);
1023 
1024 
1025  zone = sm_get_zone(ftl, zone_num);
1026  if (IS_ERR(zone)) {
1027  error = PTR_ERR(zone);
1028  goto unlock;
1029  }
1030 
1031  /* Have to look at cache first */
1032  if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1033  in_cache = 1;
1034  if (!sm_cache_get(ftl, buf, boffset))
1035  goto unlock;
1036  }
1037 
1038  /* Translate the block and return if doesn't exist in the table */
1039  block = zone->lba_to_phys_table[block];
1040 
1041  if (block == -1) {
1042  memset(buf, 0xFF, SM_SECTOR_SIZE);
1043  goto unlock;
1044  }
1045 
1046  if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1047  error = -EIO;
1048  goto unlock;
1049  }
1050 
1051  if (in_cache)
1052  sm_cache_put(ftl, buf, boffset);
1053 unlock:
1054  mutex_unlock(&ftl->mutex);
1055  return error;
1056 }
1057 
1058 /* outside interface: write a sector */
1059 static int sm_write(struct mtd_blktrans_dev *dev,
1060  unsigned long sec_no, char *buf)
1061 {
1062  struct sm_ftl *ftl = dev->priv;
1063  struct ftl_zone *zone;
1064  int error, zone_num, block, boffset;
1065 
1066  BUG_ON(ftl->readonly);
1067  sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1068 
1069  /* No need in flush thread running now */
1070  del_timer(&ftl->timer);
1071  mutex_lock(&ftl->mutex);
1072 
1073  zone = sm_get_zone(ftl, zone_num);
1074  if (IS_ERR(zone)) {
1075  error = PTR_ERR(zone);
1076  goto unlock;
1077  }
1078 
1079  /* If entry is not in cache, flush it */
1080  if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1081 
1082  error = sm_cache_flush(ftl);
1083  if (error)
1084  goto unlock;
1085 
1086  ftl->cache_block = block;
1087  ftl->cache_zone = zone_num;
1088  }
1089 
1090  sm_cache_put(ftl, buf, boffset);
1091 unlock:
1092  mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1093  mutex_unlock(&ftl->mutex);
1094  return error;
1095 }
1096 
1097 /* outside interface: flush everything */
1098 static int sm_flush(struct mtd_blktrans_dev *dev)
1099 {
1100  struct sm_ftl *ftl = dev->priv;
1101  int retval;
1102 
1103  mutex_lock(&ftl->mutex);
1104  retval = sm_cache_flush(ftl);
1105  mutex_unlock(&ftl->mutex);
1106  return retval;
1107 }
1108 
1109 /* outside interface: device is released */
1110 static int sm_release(struct mtd_blktrans_dev *dev)
1111 {
1112  struct sm_ftl *ftl = dev->priv;
1113 
1114  mutex_lock(&ftl->mutex);
1115  del_timer_sync(&ftl->timer);
1117  sm_cache_flush(ftl);
1118  mutex_unlock(&ftl->mutex);
1119  return 0;
1120 }
1121 
1122 /* outside interface: get geometry */
1123 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1124 {
1125  struct sm_ftl *ftl = dev->priv;
1126  geo->heads = ftl->heads;
1127  geo->sectors = ftl->sectors;
1128  geo->cylinders = ftl->cylinders;
1129  return 0;
1130 }
1131 
1132 /* external interface: main initialization function */
1133 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1134 {
1135  struct mtd_blktrans_dev *trans;
1136  struct sm_ftl *ftl;
1137 
1138  /* Allocate & initialize our private structure */
1139  ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1140  if (!ftl)
1141  goto error1;
1142 
1143 
1144  mutex_init(&ftl->mutex);
1145  setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1146  INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1147  init_completion(&ftl->erase_completion);
1148 
1149  /* Read media information */
1150  if (sm_get_media_info(ftl, mtd)) {
1151  dbg("found unsupported mtd device, aborting");
1152  goto error2;
1153  }
1154 
1155 
1156  /* Allocate temporary CIS buffer for read retry support */
1157  ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1158  if (!ftl->cis_buffer)
1159  goto error2;
1160 
1161  /* Allocate zone array, it will be initialized on demand */
1162  ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1163  GFP_KERNEL);
1164  if (!ftl->zones)
1165  goto error3;
1166 
1167  /* Allocate the cache*/
1168  ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1169 
1170  if (!ftl->cache_data)
1171  goto error4;
1172 
1173  sm_cache_init(ftl);
1174 
1175 
1176  /* Allocate upper layer structure and initialize it */
1177  trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1178  if (!trans)
1179  goto error5;
1180 
1181  ftl->trans = trans;
1182  trans->priv = ftl;
1183 
1184  trans->tr = tr;
1185  trans->mtd = mtd;
1186  trans->devnum = -1;
1187  trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1188  trans->readonly = ftl->readonly;
1189 
1190  if (sm_find_cis(ftl)) {
1191  dbg("CIS not found on mtd device, aborting");
1192  goto error6;
1193  }
1194 
1196  if (!ftl->disk_attributes)
1197  goto error6;
1198  trans->disk_attributes = ftl->disk_attributes;
1199 
1200  sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1201  (int)(mtd->size / (1024 * 1024)), mtd->index);
1202 
1203  dbg("FTL layout:");
1204  dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1205  ftl->zone_count, ftl->max_lba,
1206  ftl->zone_size - ftl->max_lba);
1207  dbg("each block consists of %d bytes",
1208  ftl->block_size);
1209 
1210 
1211  /* Register device*/
1212  if (add_mtd_blktrans_dev(trans)) {
1213  dbg("error in mtdblktrans layer");
1214  goto error6;
1215  }
1216  return;
1217 error6:
1218  kfree(trans);
1219 error5:
1220  kfree(ftl->cache_data);
1221 error4:
1222  kfree(ftl->zones);
1223 error3:
1224  kfree(ftl->cis_buffer);
1225 error2:
1226  kfree(ftl);
1227 error1:
1228  return;
1229 }
1230 
1231 /* main interface: device {surprise,} removal */
1232 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1233 {
1234  struct sm_ftl *ftl = dev->priv;
1235  int i;
1236 
1237  del_mtd_blktrans_dev(dev);
1238  ftl->trans = NULL;
1239 
1240  for (i = 0 ; i < ftl->zone_count; i++) {
1241 
1242  if (!ftl->zones[i].initialized)
1243  continue;
1244 
1245  kfree(ftl->zones[i].lba_to_phys_table);
1246  kfifo_free(&ftl->zones[i].free_sectors);
1247  }
1248 
1250  kfree(ftl->cis_buffer);
1251  kfree(ftl->zones);
1252  kfree(ftl->cache_data);
1253  kfree(ftl);
1254 }
1255 
1256 static struct mtd_blktrans_ops sm_ftl_ops = {
1257  .name = "smblk",
1258  .major = 0,
1259  .part_bits = SM_FTL_PARTN_BITS,
1260  .blksize = SM_SECTOR_SIZE,
1261  .getgeo = sm_getgeo,
1262 
1263  .add_mtd = sm_add_mtd,
1264  .remove_dev = sm_remove_dev,
1265 
1266  .readsect = sm_read,
1267  .writesect = sm_write,
1268 
1269  .flush = sm_flush,
1270  .release = sm_release,
1271 
1272  .owner = THIS_MODULE,
1273 };
1274 
1275 static __init int sm_module_init(void)
1276 {
1277  int error = 0;
1278  cache_flush_workqueue = create_freezable_workqueue("smflush");
1279 
1280  if (IS_ERR(cache_flush_workqueue))
1281  return PTR_ERR(cache_flush_workqueue);
1282 
1283  error = register_mtd_blktrans(&sm_ftl_ops);
1284  if (error)
1285  destroy_workqueue(cache_flush_workqueue);
1286  return error;
1287 
1288 }
1289 
1290 static void __exit sm_module_exit(void)
1291 {
1292  destroy_workqueue(cache_flush_workqueue);
1293  deregister_mtd_blktrans(&sm_ftl_ops);
1294 }
1295 
1296 module_init(sm_module_init);
1297 module_exit(sm_module_exit);
1298 
1299 MODULE_LICENSE("GPL");
1300 MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
1301 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");