Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
onenand_base.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/mtd/onenand/onenand_base.c
3  *
4  * Copyright © 2005-2009 Samsung Electronics
5  * Copyright © 2007 Nokia Corporation
6  *
7  * Kyungmin Park <[email protected]>
8  *
9  * Credits:
10  * Adrian Hunter <[email protected]>:
11  * auto-placement support, read-while load support, various fixes
12  *
13  * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
14  * Flex-OneNAND support
15  * Amul Kumar Saha <amul.saha at samsung.com>
16  * OTP support
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License version 2 as
20  * published by the Free Software Foundation.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/sched.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/jiffies.h>
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/onenand.h>
34 #include <linux/mtd/partitions.h>
35 
36 #include <asm/io.h>
37 
38 /*
39  * Multiblock erase if number of blocks to erase is 2 or more.
40  * Maximum number of blocks for simultaneous erase is 64.
41  */
42 #define MB_ERASE_MIN_BLK_COUNT 2
43 #define MB_ERASE_MAX_BLK_COUNT 64
44 
45 /* Default Flex-OneNAND boundary and lock respectively */
46 static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
47 
48 module_param_array(flex_bdry, int, NULL, 0400);
49 MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
50  "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
51  "DIE_BDRY: SLC boundary of the die"
52  "LOCK: Locking information for SLC boundary"
53  " : 0->Set boundary in unlocked status"
54  " : 1->Set boundary in locked status");
55 
56 /* Default OneNAND/Flex-OneNAND OTP options*/
57 static int otp;
58 
59 module_param(otp, int, 0400);
60 MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
61  "Syntax : otp=LOCK_TYPE"
62  "LOCK_TYPE : Keys issued, for specific OTP Lock type"
63  " : 0 -> Default (No Blocks Locked)"
64  " : 1 -> OTP Block lock"
65  " : 2 -> 1st Block lock"
66  " : 3 -> BOTH OTP Block and 1st Block lock");
67 
68 /*
69  * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
70  * For now, we expose only 64 out of 80 ecc bytes
71  */
72 static struct nand_ecclayout flexonenand_oob_128 = {
73  .eccbytes = 64,
74  .eccpos = {
75  6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
76  22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
77  38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
78  54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
79  70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80  86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
81  102, 103, 104, 105
82  },
83  .oobfree = {
84  {2, 4}, {18, 4}, {34, 4}, {50, 4},
85  {66, 4}, {82, 4}, {98, 4}, {114, 4}
86  }
87 };
88 
89 /*
90  * onenand_oob_128 - oob info for OneNAND with 4KB page
91  *
92  * Based on specification:
93  * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
94  *
95  * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
96  *
97  * oobfree uses the spare area fields marked as
98  * "Managed by internal ECC logic for Logical Sector Number area"
99  */
100 static struct nand_ecclayout onenand_oob_128 = {
101  .eccbytes = 64,
102  .eccpos = {
103  7, 8, 9, 10, 11, 12, 13, 14, 15,
104  23, 24, 25, 26, 27, 28, 29, 30, 31,
105  39, 40, 41, 42, 43, 44, 45, 46, 47,
106  55, 56, 57, 58, 59, 60, 61, 62, 63,
107  71, 72, 73, 74, 75, 76, 77, 78, 79,
108  87, 88, 89, 90, 91, 92, 93, 94, 95,
109  103, 104, 105, 106, 107, 108, 109, 110, 111,
110  119
111  },
112  .oobfree = {
113  {2, 3}, {18, 3}, {34, 3}, {50, 3},
114  {66, 3}, {82, 3}, {98, 3}, {114, 3}
115  }
116 };
117 
121 static struct nand_ecclayout onenand_oob_64 = {
122  .eccbytes = 20,
123  .eccpos = {
124  8, 9, 10, 11, 12,
125  24, 25, 26, 27, 28,
126  40, 41, 42, 43, 44,
127  56, 57, 58, 59, 60,
128  },
129  .oobfree = {
130  {2, 3}, {14, 2}, {18, 3}, {30, 2},
131  {34, 3}, {46, 2}, {50, 3}, {62, 2}
132  }
133 };
134 
138 static struct nand_ecclayout onenand_oob_32 = {
139  .eccbytes = 10,
140  .eccpos = {
141  8, 9, 10, 11, 12,
142  24, 25, 26, 27, 28,
143  },
144  .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
145 };
146 
147 static const unsigned char ffchars[] = {
148  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
149  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
150  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
151  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
152  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
153  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
154  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
155  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
156  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
157  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
158  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
159  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
160  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
161  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
162  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
163  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
164 };
165 
172 static unsigned short onenand_readw(void __iomem *addr)
173 {
174  return readw(addr);
175 }
176 
184 static void onenand_writew(unsigned short value, void __iomem *addr)
185 {
186  writew(value, addr);
187 }
188 
197 static int onenand_block_address(struct onenand_chip *this, int block)
198 {
199  /* Device Flash Core select, NAND Flash Block Address */
200  if (block & this->density_mask)
201  return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
202 
203  return block;
204 }
205 
214 static int onenand_bufferram_address(struct onenand_chip *this, int block)
215 {
216  /* Device BufferRAM Select */
217  if (block & this->density_mask)
218  return ONENAND_DDP_CHIP1;
219 
220  return ONENAND_DDP_CHIP0;
221 }
222 
231 static int onenand_page_address(int page, int sector)
232 {
233  /* Flash Page Address, Flash Sector Address */
234  int fpa, fsa;
235 
236  fpa = page & ONENAND_FPA_MASK;
237  fsa = sector & ONENAND_FSA_MASK;
238 
239  return ((fpa << ONENAND_FPA_SHIFT) | fsa);
240 }
241 
251 static int onenand_buffer_address(int dataram1, int sectors, int count)
252 {
253  int bsa, bsc;
254 
255  /* BufferRAM Sector Address */
256  bsa = sectors & ONENAND_BSA_MASK;
257 
258  if (dataram1)
259  bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
260  else
261  bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
262 
263  /* BufferRAM Sector Count */
264  bsc = count & ONENAND_BSC_MASK;
265 
266  return ((bsa << ONENAND_BSA_SHIFT) | bsc);
267 }
268 
274 static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
275 {
276  unsigned boundary, blk, die = 0;
277 
278  if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
279  die = 1;
280  addr -= this->diesize[0];
281  }
282 
283  boundary = this->boundary[die];
284 
285  blk = addr >> (this->erase_shift - 1);
286  if (blk > boundary)
287  blk = (blk + boundary + 1) >> 1;
288 
289  blk += die ? this->density_mask : 0;
290  return blk;
291 }
292 
293 inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
294 {
295  if (!FLEXONENAND(this))
296  return addr >> this->erase_shift;
297  return flexonenand_block(this, addr);
298 }
299 
307 static loff_t flexonenand_addr(struct onenand_chip *this, int block)
308 {
309  loff_t ofs = 0;
310  int die = 0, boundary;
311 
312  if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
313  block -= this->density_mask;
314  die = 1;
315  ofs = this->diesize[0];
316  }
317 
318  boundary = this->boundary[die];
319  ofs += (loff_t)block << (this->erase_shift - 1);
320  if (block > (boundary + 1))
321  ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
322  return ofs;
323 }
324 
325 loff_t onenand_addr(struct onenand_chip *this, int block)
326 {
327  if (!FLEXONENAND(this))
328  return (loff_t)block << this->erase_shift;
329  return flexonenand_addr(this, block);
330 }
332 
339 static inline int onenand_get_density(int dev_id)
340 {
341  int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
342  return (density & ONENAND_DEVICE_DENSITY_MASK);
343 }
344 
350 int flexonenand_region(struct mtd_info *mtd, loff_t addr)
351 {
352  int i;
353 
354  for (i = 0; i < mtd->numeraseregions; i++)
355  if (addr < mtd->eraseregions[i].offset)
356  break;
357  return i - 1;
358 }
360 
371 static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
372 {
373  struct onenand_chip *this = mtd->priv;
374  int value, block, page;
375 
376  /* Address translation */
377  switch (cmd) {
378  case ONENAND_CMD_UNLOCK:
379  case ONENAND_CMD_LOCK:
382  block = -1;
383  page = -1;
384  break;
385 
387  /* addr contains die index */
388  block = addr * this->density_mask;
389  page = -1;
390  break;
391 
392  case ONENAND_CMD_ERASE:
397  block = onenand_block(this, addr);
398  page = -1;
399  break;
400 
402  cmd = ONENAND_CMD_READ;
403  block = addr * this->density_mask;
404  page = 0;
405  break;
406 
407  default:
408  block = onenand_block(this, addr);
409  if (FLEXONENAND(this))
410  page = (int) (addr - onenand_addr(this, block))>>\
411  this->page_shift;
412  else
413  page = (int) (addr >> this->page_shift);
414  if (ONENAND_IS_2PLANE(this)) {
415  /* Make the even block number */
416  block &= ~1;
417  /* Is it the odd plane? */
418  if (addr & this->writesize)
419  block++;
420  page >>= 1;
421  }
422  page &= this->page_mask;
423  break;
424  }
425 
426  /* NOTE: The setting order of the registers is very important! */
427  if (cmd == ONENAND_CMD_BUFFERRAM) {
428  /* Select DataRAM for DDP */
429  value = onenand_bufferram_address(this, block);
430  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
431 
432  if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
433  /* It is always BufferRAM0 */
435  else
436  /* Switch to the next data buffer */
438 
439  return 0;
440  }
441 
442  if (block != -1) {
443  /* Write 'DFS, FBA' of Flash */
444  value = onenand_block_address(this, block);
445  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
446 
447  /* Select DataRAM for DDP */
448  value = onenand_bufferram_address(this, block);
449  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
450  }
451 
452  if (page != -1) {
453  /* Now we use page size operation */
454  int sectors = 0, count = 0;
455  int dataram;
456 
457  switch (cmd) {
459  case ONENAND_CMD_READ:
460  case ONENAND_CMD_READOOB:
461  if (ONENAND_IS_4KB_PAGE(this))
462  /* It is always BufferRAM0 */
463  dataram = ONENAND_SET_BUFFERRAM0(this);
464  else
465  dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
466  break;
467 
468  default:
469  if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
470  cmd = ONENAND_CMD_2X_PROG;
471  dataram = ONENAND_CURRENT_BUFFERRAM(this);
472  break;
473  }
474 
475  /* Write 'FPA, FSA' of Flash */
476  value = onenand_page_address(page, sectors);
477  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS8);
478 
479  /* Write 'BSA, BSC' of DataRAM */
480  value = onenand_buffer_address(dataram, sectors, count);
481  this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
482  }
483 
484  /* Interrupt clear */
486 
487  /* Write command */
488  this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
489 
490  return 0;
491 }
492 
497 static inline int onenand_read_ecc(struct onenand_chip *this)
498 {
499  int ecc, i, result = 0;
500 
501  if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
502  return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
503 
504  for (i = 0; i < 4; i++) {
505  ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
506  if (likely(!ecc))
507  continue;
509  return ONENAND_ECC_2BIT_ALL;
510  else
511  result = ONENAND_ECC_1BIT_ALL;
512  }
513 
514  return result;
515 }
516 
526 static int onenand_wait(struct mtd_info *mtd, int state)
527 {
528  struct onenand_chip * this = mtd->priv;
529  unsigned long timeout;
530  unsigned int flags = ONENAND_INT_MASTER;
531  unsigned int interrupt = 0;
532  unsigned int ctrl;
533 
534  /* The 20 msec is enough */
535  timeout = jiffies + msecs_to_jiffies(20);
536  while (time_before(jiffies, timeout)) {
537  interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
538 
539  if (interrupt & flags)
540  break;
541 
542  if (state != FL_READING && state != FL_PREPARING_ERASE)
543  cond_resched();
544  }
545  /* To get correct interrupt status in timeout case */
546  interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
547 
548  ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
549 
550  /*
551  * In the Spec. it checks the controller status first
552  * However if you get the correct information in case of
553  * power off recovery (POR) test, it should read ECC status first
554  */
555  if (interrupt & ONENAND_INT_READ) {
556  int ecc = onenand_read_ecc(this);
557  if (ecc) {
558  if (ecc & ONENAND_ECC_2BIT_ALL) {
559  printk(KERN_ERR "%s: ECC error = 0x%04x\n",
560  __func__, ecc);
561  mtd->ecc_stats.failed++;
562  return -EBADMSG;
563  } else if (ecc & ONENAND_ECC_1BIT_ALL) {
564  printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
565  __func__, ecc);
566  mtd->ecc_stats.corrected++;
567  }
568  }
569  } else if (state == FL_READING) {
570  printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
571  __func__, ctrl, interrupt);
572  return -EIO;
573  }
574 
575  if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
576  printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
577  __func__, ctrl, interrupt);
578  return -EIO;
579  }
580 
581  if (!(interrupt & ONENAND_INT_MASTER)) {
582  printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
583  __func__, ctrl, interrupt);
584  return -EIO;
585  }
586 
587  /* If there's controller error, it's a real error */
588  if (ctrl & ONENAND_CTRL_ERROR) {
589  printk(KERN_ERR "%s: controller error = 0x%04x\n",
590  __func__, ctrl);
591  if (ctrl & ONENAND_CTRL_LOCK)
592  printk(KERN_ERR "%s: it's locked error.\n", __func__);
593  return -EIO;
594  }
595 
596  return 0;
597 }
598 
599 /*
600  * onenand_interrupt - [DEFAULT] onenand interrupt handler
601  * @param irq onenand interrupt number
602  * @param dev_id interrupt data
603  *
604  * complete the work
605  */
606 static irqreturn_t onenand_interrupt(int irq, void *data)
607 {
608  struct onenand_chip *this = data;
609 
610  /* To handle shared interrupt */
611  if (!this->complete.done)
612  complete(&this->complete);
613 
614  return IRQ_HANDLED;
615 }
616 
617 /*
618  * onenand_interrupt_wait - [DEFAULT] wait until the command is done
619  * @param mtd MTD device structure
620  * @param state state to select the max. timeout value
621  *
622  * Wait for command done.
623  */
624 static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
625 {
626  struct onenand_chip *this = mtd->priv;
627 
629 
630  return onenand_wait(mtd, state);
631 }
632 
633 /*
634  * onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
635  * @param mtd MTD device structure
636  * @param state state to select the max. timeout value
637  *
638  * Try interrupt based wait (It is used one-time)
639  */
640 static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
641 {
642  struct onenand_chip *this = mtd->priv;
643  unsigned long remain, timeout;
644 
645  /* We use interrupt wait first */
646  this->wait = onenand_interrupt_wait;
647 
648  timeout = msecs_to_jiffies(100);
649  remain = wait_for_completion_timeout(&this->complete, timeout);
650  if (!remain) {
651  printk(KERN_INFO "OneNAND: There's no interrupt. "
652  "We use the normal wait\n");
653 
654  /* Release the irq */
655  free_irq(this->irq, this);
656 
657  this->wait = onenand_wait;
658  }
659 
660  return onenand_wait(mtd, state);
661 }
662 
663 /*
664  * onenand_setup_wait - [OneNAND Interface] setup onenand wait method
665  * @param mtd MTD device structure
666  *
667  * There's two method to wait onenand work
668  * 1. polling - read interrupt status register
669  * 2. interrupt - use the kernel interrupt method
670  */
671 static void onenand_setup_wait(struct mtd_info *mtd)
672 {
673  struct onenand_chip *this = mtd->priv;
674  int syscfg;
675 
676  init_completion(&this->complete);
677 
678  if (this->irq <= 0) {
679  this->wait = onenand_wait;
680  return;
681  }
682 
683  if (request_irq(this->irq, &onenand_interrupt,
684  IRQF_SHARED, "onenand", this)) {
685  /* If we can't get irq, use the normal wait */
686  this->wait = onenand_wait;
687  return;
688  }
689 
690  /* Enable interrupt */
691  syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
692  syscfg |= ONENAND_SYS_CFG1_IOBE;
693  this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
694 
695  this->wait = onenand_try_interrupt_wait;
696 }
697 
706 static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
707 {
708  struct onenand_chip *this = mtd->priv;
709 
710  if (ONENAND_CURRENT_BUFFERRAM(this)) {
711  /* Note: the 'this->writesize' is a real page size */
712  if (area == ONENAND_DATARAM)
713  return this->writesize;
714  if (area == ONENAND_SPARERAM)
715  return mtd->oobsize;
716  }
717 
718  return 0;
719 }
720 
731 static int onenand_read_bufferram(struct mtd_info *mtd, int area,
732  unsigned char *buffer, int offset, size_t count)
733 {
734  struct onenand_chip *this = mtd->priv;
735  void __iomem *bufferram;
736 
737  bufferram = this->base + area;
738 
739  bufferram += onenand_bufferram_offset(mtd, area);
740 
741  if (ONENAND_CHECK_BYTE_ACCESS(count)) {
742  unsigned short word;
743 
744  /* Align with word(16-bit) size */
745  count--;
746 
747  /* Read word and save byte */
748  word = this->read_word(bufferram + offset + count);
749  buffer[count] = (word & 0xff);
750  }
751 
752  memcpy(buffer, bufferram + offset, count);
753 
754  return 0;
755 }
756 
767 static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
768  unsigned char *buffer, int offset, size_t count)
769 {
770  struct onenand_chip *this = mtd->priv;
771  void __iomem *bufferram;
772 
773  bufferram = this->base + area;
774 
775  bufferram += onenand_bufferram_offset(mtd, area);
776 
778 
779  if (ONENAND_CHECK_BYTE_ACCESS(count)) {
780  unsigned short word;
781 
782  /* Align with word(16-bit) size */
783  count--;
784 
785  /* Read word and save byte */
786  word = this->read_word(bufferram + offset + count);
787  buffer[count] = (word & 0xff);
788  }
789 
790  memcpy(buffer, bufferram + offset, count);
791 
792  this->mmcontrol(mtd, 0);
793 
794  return 0;
795 }
796 
807 static int onenand_write_bufferram(struct mtd_info *mtd, int area,
808  const unsigned char *buffer, int offset, size_t count)
809 {
810  struct onenand_chip *this = mtd->priv;
811  void __iomem *bufferram;
812 
813  bufferram = this->base + area;
814 
815  bufferram += onenand_bufferram_offset(mtd, area);
816 
817  if (ONENAND_CHECK_BYTE_ACCESS(count)) {
818  unsigned short word;
819  int byte_offset;
820 
821  /* Align with word(16-bit) size */
822  count--;
823 
824  /* Calculate byte access offset */
825  byte_offset = offset + count;
826 
827  /* Read word and save byte */
828  word = this->read_word(bufferram + byte_offset);
829  word = (word & ~0xff) | buffer[count];
830  this->write_word(word, bufferram + byte_offset);
831  }
832 
833  memcpy(bufferram + offset, buffer, count);
834 
835  return 0;
836 }
837 
846 static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
847 {
848  struct onenand_chip *this = mtd->priv;
849  int blockpage, block, page;
850 
851  /* Calculate the even block number */
852  block = (int) (addr >> this->erase_shift) & ~1;
853  /* Is it the odd plane? */
854  if (addr & this->writesize)
855  block++;
856  page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
857  blockpage = (block << 7) | page;
858 
859  return blockpage;
860 }
861 
870 static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
871 {
872  struct onenand_chip *this = mtd->priv;
873  int blockpage, found = 0;
874  unsigned int i;
875 
876  if (ONENAND_IS_2PLANE(this))
877  blockpage = onenand_get_2x_blockpage(mtd, addr);
878  else
879  blockpage = (int) (addr >> this->page_shift);
880 
881  /* Is there valid data? */
882  i = ONENAND_CURRENT_BUFFERRAM(this);
883  if (this->bufferram[i].blockpage == blockpage)
884  found = 1;
885  else {
886  /* Check another BufferRAM */
887  i = ONENAND_NEXT_BUFFERRAM(this);
888  if (this->bufferram[i].blockpage == blockpage) {
890  found = 1;
891  }
892  }
893 
894  if (found && ONENAND_IS_DDP(this)) {
895  /* Select DataRAM for DDP */
896  int block = onenand_block(this, addr);
897  int value = onenand_bufferram_address(this, block);
898  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
899  }
900 
901  return found;
902 }
903 
912 static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
913  int valid)
914 {
915  struct onenand_chip *this = mtd->priv;
916  int blockpage;
917  unsigned int i;
918 
919  if (ONENAND_IS_2PLANE(this))
920  blockpage = onenand_get_2x_blockpage(mtd, addr);
921  else
922  blockpage = (int) (addr >> this->page_shift);
923 
924  /* Invalidate another BufferRAM */
925  i = ONENAND_NEXT_BUFFERRAM(this);
926  if (this->bufferram[i].blockpage == blockpage)
927  this->bufferram[i].blockpage = -1;
928 
929  /* Update BufferRAM */
930  i = ONENAND_CURRENT_BUFFERRAM(this);
931  if (valid)
932  this->bufferram[i].blockpage = blockpage;
933  else
934  this->bufferram[i].blockpage = -1;
935 }
936 
945 static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
946  unsigned int len)
947 {
948  struct onenand_chip *this = mtd->priv;
949  int i;
950  loff_t end_addr = addr + len;
951 
952  /* Invalidate BufferRAM */
953  for (i = 0; i < MAX_BUFFERRAM; i++) {
954  loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
955  if (buf_addr >= addr && buf_addr < end_addr)
956  this->bufferram[i].blockpage = -1;
957  }
958 }
959 
967 static int onenand_get_device(struct mtd_info *mtd, int new_state)
968 {
969  struct onenand_chip *this = mtd->priv;
971 
972  /*
973  * Grab the lock and see if the device is available
974  */
975  while (1) {
976  spin_lock(&this->chip_lock);
977  if (this->state == FL_READY) {
978  this->state = new_state;
979  spin_unlock(&this->chip_lock);
980  if (new_state != FL_PM_SUSPENDED && this->enable)
981  this->enable(mtd);
982  break;
983  }
984  if (new_state == FL_PM_SUSPENDED) {
985  spin_unlock(&this->chip_lock);
986  return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
987  }
989  add_wait_queue(&this->wq, &wait);
990  spin_unlock(&this->chip_lock);
991  schedule();
992  remove_wait_queue(&this->wq, &wait);
993  }
994 
995  return 0;
996 }
997 
1004 static void onenand_release_device(struct mtd_info *mtd)
1005 {
1006  struct onenand_chip *this = mtd->priv;
1007 
1008  if (this->state != FL_PM_SUSPENDED && this->disable)
1009  this->disable(mtd);
1010  /* Release the chip */
1011  spin_lock(&this->chip_lock);
1012  this->state = FL_READY;
1013  wake_up(&this->wq);
1014  spin_unlock(&this->chip_lock);
1015 }
1016 
1024 static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
1025  int thislen)
1026 {
1027  struct onenand_chip *this = mtd->priv;
1028  struct nand_oobfree *free;
1029  int readcol = column;
1030  int readend = column + thislen;
1031  int lastgap = 0;
1032  unsigned int i;
1033  uint8_t *oob_buf = this->oob_buf;
1034 
1035  free = this->ecclayout->oobfree;
1036  for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1037  if (readcol >= lastgap)
1038  readcol += free->offset - lastgap;
1039  if (readend >= lastgap)
1040  readend += free->offset - lastgap;
1041  lastgap = free->offset + free->length;
1042  }
1043  this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
1044  free = this->ecclayout->oobfree;
1045  for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1046  int free_end = free->offset + free->length;
1047  if (free->offset < readend && free_end > readcol) {
1048  int st = max_t(int,free->offset,readcol);
1049  int ed = min_t(int,free_end,readend);
1050  int n = ed - st;
1051  memcpy(buf, oob_buf + st, n);
1052  buf += n;
1053  } else if (column == 0)
1054  break;
1055  }
1056  return 0;
1057 }
1058 
1072 static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1073 {
1074  struct onenand_chip *this = mtd->priv;
1075  int i;
1076 
1077  /* Recovery is only for Flex-OneNAND */
1078  if (!FLEXONENAND(this))
1079  return status;
1080 
1081  /* check if we failed due to uncorrectable error */
1082  if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
1083  return status;
1084 
1085  /* check if address lies in MLC region */
1086  i = flexonenand_region(mtd, addr);
1087  if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
1088  return status;
1089 
1090  /* We are attempting to reread, so decrement stats.failed
1091  * which was incremented by onenand_wait due to read failure
1092  */
1093  printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
1094  __func__);
1095  mtd->ecc_stats.failed--;
1096 
1097  /* Issue the LSB page recovery command */
1098  this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
1099  return this->wait(mtd, FL_READING);
1100 }
1101 
1111 static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1112  struct mtd_oob_ops *ops)
1113 {
1114  struct onenand_chip *this = mtd->priv;
1115  struct mtd_ecc_stats stats;
1116  size_t len = ops->len;
1117  size_t ooblen = ops->ooblen;
1118  u_char *buf = ops->datbuf;
1119  u_char *oobbuf = ops->oobbuf;
1120  int read = 0, column, thislen;
1121  int oobread = 0, oobcolumn, thisooblen, oobsize;
1122  int ret = 0;
1123  int writesize = this->writesize;
1124 
1125  pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1126  (int)len);
1127 
1128  if (ops->mode == MTD_OPS_AUTO_OOB)
1129  oobsize = this->ecclayout->oobavail;
1130  else
1131  oobsize = mtd->oobsize;
1132 
1133  oobcolumn = from & (mtd->oobsize - 1);
1134 
1135  /* Do not allow reads past end of device */
1136  if (from + len > mtd->size) {
1137  printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1138  __func__);
1139  ops->retlen = 0;
1140  ops->oobretlen = 0;
1141  return -EINVAL;
1142  }
1143 
1144  stats = mtd->ecc_stats;
1145 
1146  while (read < len) {
1147  cond_resched();
1148 
1149  thislen = min_t(int, writesize, len - read);
1150 
1151  column = from & (writesize - 1);
1152  if (column + thislen > writesize)
1153  thislen = writesize - column;
1154 
1155  if (!onenand_check_bufferram(mtd, from)) {
1156  this->command(mtd, ONENAND_CMD_READ, from, writesize);
1157 
1158  ret = this->wait(mtd, FL_READING);
1159  if (unlikely(ret))
1160  ret = onenand_recover_lsb(mtd, from, ret);
1161  onenand_update_bufferram(mtd, from, !ret);
1162  if (mtd_is_eccerr(ret))
1163  ret = 0;
1164  if (ret)
1165  break;
1166  }
1167 
1168  this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1169  if (oobbuf) {
1170  thisooblen = oobsize - oobcolumn;
1171  thisooblen = min_t(int, thisooblen, ooblen - oobread);
1172 
1173  if (ops->mode == MTD_OPS_AUTO_OOB)
1174  onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1175  else
1176  this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
1177  oobread += thisooblen;
1178  oobbuf += thisooblen;
1179  oobcolumn = 0;
1180  }
1181 
1182  read += thislen;
1183  if (read == len)
1184  break;
1185 
1186  from += thislen;
1187  buf += thislen;
1188  }
1189 
1190  /*
1191  * Return success, if no ECC failures, else -EBADMSG
1192  * fs driver will take care of that, because
1193  * retlen == desired len and result == -EBADMSG
1194  */
1195  ops->retlen = read;
1196  ops->oobretlen = oobread;
1197 
1198  if (ret)
1199  return ret;
1200 
1201  if (mtd->ecc_stats.failed - stats.failed)
1202  return -EBADMSG;
1203 
1204  /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1205  return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1206 }
1207 
1216 static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1217  struct mtd_oob_ops *ops)
1218 {
1219  struct onenand_chip *this = mtd->priv;
1220  struct mtd_ecc_stats stats;
1221  size_t len = ops->len;
1222  size_t ooblen = ops->ooblen;
1223  u_char *buf = ops->datbuf;
1224  u_char *oobbuf = ops->oobbuf;
1225  int read = 0, column, thislen;
1226  int oobread = 0, oobcolumn, thisooblen, oobsize;
1227  int ret = 0, boundary = 0;
1228  int writesize = this->writesize;
1229 
1230  pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1231  (int)len);
1232 
1233  if (ops->mode == MTD_OPS_AUTO_OOB)
1234  oobsize = this->ecclayout->oobavail;
1235  else
1236  oobsize = mtd->oobsize;
1237 
1238  oobcolumn = from & (mtd->oobsize - 1);
1239 
1240  /* Do not allow reads past end of device */
1241  if ((from + len) > mtd->size) {
1242  printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1243  __func__);
1244  ops->retlen = 0;
1245  ops->oobretlen = 0;
1246  return -EINVAL;
1247  }
1248 
1249  stats = mtd->ecc_stats;
1250 
1251  /* Read-while-load method */
1252 
1253  /* Do first load to bufferRAM */
1254  if (read < len) {
1255  if (!onenand_check_bufferram(mtd, from)) {
1256  this->command(mtd, ONENAND_CMD_READ, from, writesize);
1257  ret = this->wait(mtd, FL_READING);
1258  onenand_update_bufferram(mtd, from, !ret);
1259  if (mtd_is_eccerr(ret))
1260  ret = 0;
1261  }
1262  }
1263 
1264  thislen = min_t(int, writesize, len - read);
1265  column = from & (writesize - 1);
1266  if (column + thislen > writesize)
1267  thislen = writesize - column;
1268 
1269  while (!ret) {
1270  /* If there is more to load then start next load */
1271  from += thislen;
1272  if (read + thislen < len) {
1273  this->command(mtd, ONENAND_CMD_READ, from, writesize);
1274  /*
1275  * Chip boundary handling in DDP
1276  * Now we issued chip 1 read and pointed chip 1
1277  * bufferram so we have to point chip 0 bufferram.
1278  */
1279  if (ONENAND_IS_DDP(this) &&
1280  unlikely(from == (this->chipsize >> 1))) {
1281  this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
1282  boundary = 1;
1283  } else
1284  boundary = 0;
1286  }
1287  /* While load is going, read from last bufferRAM */
1288  this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
1289 
1290  /* Read oob area if needed */
1291  if (oobbuf) {
1292  thisooblen = oobsize - oobcolumn;
1293  thisooblen = min_t(int, thisooblen, ooblen - oobread);
1294 
1295  if (ops->mode == MTD_OPS_AUTO_OOB)
1296  onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1297  else
1298  this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
1299  oobread += thisooblen;
1300  oobbuf += thisooblen;
1301  oobcolumn = 0;
1302  }
1303 
1304  /* See if we are done */
1305  read += thislen;
1306  if (read == len)
1307  break;
1308  /* Set up for next read from bufferRAM */
1309  if (unlikely(boundary))
1310  this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
1312  buf += thislen;
1313  thislen = min_t(int, writesize, len - read);
1314  column = 0;
1315  cond_resched();
1316  /* Now wait for load */
1317  ret = this->wait(mtd, FL_READING);
1318  onenand_update_bufferram(mtd, from, !ret);
1319  if (mtd_is_eccerr(ret))
1320  ret = 0;
1321  }
1322 
1323  /*
1324  * Return success, if no ECC failures, else -EBADMSG
1325  * fs driver will take care of that, because
1326  * retlen == desired len and result == -EBADMSG
1327  */
1328  ops->retlen = read;
1329  ops->oobretlen = oobread;
1330 
1331  if (ret)
1332  return ret;
1333 
1334  if (mtd->ecc_stats.failed - stats.failed)
1335  return -EBADMSG;
1336 
1337  /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1338  return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1339 }
1340 
1349 static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1350  struct mtd_oob_ops *ops)
1351 {
1352  struct onenand_chip *this = mtd->priv;
1353  struct mtd_ecc_stats stats;
1354  int read = 0, thislen, column, oobsize;
1355  size_t len = ops->ooblen;
1356  unsigned int mode = ops->mode;
1357  u_char *buf = ops->oobbuf;
1358  int ret = 0, readcmd;
1359 
1360  from += ops->ooboffs;
1361 
1362  pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1363  (int)len);
1364 
1365  /* Initialize return length value */
1366  ops->oobretlen = 0;
1367 
1368  if (mode == MTD_OPS_AUTO_OOB)
1369  oobsize = this->ecclayout->oobavail;
1370  else
1371  oobsize = mtd->oobsize;
1372 
1373  column = from & (mtd->oobsize - 1);
1374 
1375  if (unlikely(column >= oobsize)) {
1376  printk(KERN_ERR "%s: Attempted to start read outside oob\n",
1377  __func__);
1378  return -EINVAL;
1379  }
1380 
1381  /* Do not allow reads past end of device */
1382  if (unlikely(from >= mtd->size ||
1383  column + len > ((mtd->size >> this->page_shift) -
1384  (from >> this->page_shift)) * oobsize)) {
1385  printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
1386  __func__);
1387  return -EINVAL;
1388  }
1389 
1390  stats = mtd->ecc_stats;
1391 
1393 
1394  while (read < len) {
1395  cond_resched();
1396 
1397  thislen = oobsize - column;
1398  thislen = min_t(int, thislen, len);
1399 
1400  this->command(mtd, readcmd, from, mtd->oobsize);
1401 
1402  onenand_update_bufferram(mtd, from, 0);
1403 
1404  ret = this->wait(mtd, FL_READING);
1405  if (unlikely(ret))
1406  ret = onenand_recover_lsb(mtd, from, ret);
1407 
1408  if (ret && !mtd_is_eccerr(ret)) {
1409  printk(KERN_ERR "%s: read failed = 0x%x\n",
1410  __func__, ret);
1411  break;
1412  }
1413 
1414  if (mode == MTD_OPS_AUTO_OOB)
1415  onenand_transfer_auto_oob(mtd, buf, column, thislen);
1416  else
1417  this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
1418 
1419  read += thislen;
1420 
1421  if (read == len)
1422  break;
1423 
1424  buf += thislen;
1425 
1426  /* Read more? */
1427  if (read < len) {
1428  /* Page size */
1429  from += mtd->writesize;
1430  column = 0;
1431  }
1432  }
1433 
1434  ops->oobretlen = read;
1435 
1436  if (ret)
1437  return ret;
1438 
1439  if (mtd->ecc_stats.failed - stats.failed)
1440  return -EBADMSG;
1441 
1442  return 0;
1443 }
1444 
1455 static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1456  size_t *retlen, u_char *buf)
1457 {
1458  struct onenand_chip *this = mtd->priv;
1459  struct mtd_oob_ops ops = {
1460  .len = len,
1461  .ooblen = 0,
1462  .datbuf = buf,
1463  .oobbuf = NULL,
1464  };
1465  int ret;
1466 
1467  onenand_get_device(mtd, FL_READING);
1468  ret = ONENAND_IS_4KB_PAGE(this) ?
1469  onenand_mlc_read_ops_nolock(mtd, from, &ops) :
1470  onenand_read_ops_nolock(mtd, from, &ops);
1471  onenand_release_device(mtd);
1472 
1473  *retlen = ops.retlen;
1474  return ret;
1475 }
1476 
1485 static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1486  struct mtd_oob_ops *ops)
1487 {
1488  struct onenand_chip *this = mtd->priv;
1489  int ret;
1490 
1491  switch (ops->mode) {
1492  case MTD_OPS_PLACE_OOB:
1493  case MTD_OPS_AUTO_OOB:
1494  break;
1495  case MTD_OPS_RAW:
1496  /* Not implemented yet */
1497  default:
1498  return -EINVAL;
1499  }
1500 
1501  onenand_get_device(mtd, FL_READING);
1502  if (ops->datbuf)
1503  ret = ONENAND_IS_4KB_PAGE(this) ?
1504  onenand_mlc_read_ops_nolock(mtd, from, ops) :
1505  onenand_read_ops_nolock(mtd, from, ops);
1506  else
1507  ret = onenand_read_oob_nolock(mtd, from, ops);
1508  onenand_release_device(mtd);
1509 
1510  return ret;
1511 }
1512 
1520 static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1521 {
1522  struct onenand_chip *this = mtd->priv;
1523  unsigned long timeout;
1524  unsigned int interrupt, ctrl, ecc, addr1, addr8;
1525 
1526  /* The 20 msec is enough */
1527  timeout = jiffies + msecs_to_jiffies(20);
1528  while (time_before(jiffies, timeout)) {
1529  interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1530  if (interrupt & ONENAND_INT_MASTER)
1531  break;
1532  }
1533  /* To get correct interrupt status in timeout case */
1534  interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1535  ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1536  addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
1537  addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
1538 
1539  if (interrupt & ONENAND_INT_READ) {
1540  ecc = onenand_read_ecc(this);
1541  if (ecc & ONENAND_ECC_2BIT_ALL) {
1542  printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
1543  "intr 0x%04x addr1 %#x addr8 %#x\n",
1544  __func__, ecc, ctrl, interrupt, addr1, addr8);
1546  }
1547  } else {
1548  printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
1549  "intr 0x%04x addr1 %#x addr8 %#x\n",
1550  __func__, ctrl, interrupt, addr1, addr8);
1552  }
1553 
1554  /* Initial bad block case: 0x2400 or 0x0400 */
1555  if (ctrl & ONENAND_CTRL_ERROR) {
1556  printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
1557  "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
1558  return ONENAND_BBT_READ_ERROR;
1559  }
1560 
1561  return 0;
1562 }
1563 
1572 int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1573  struct mtd_oob_ops *ops)
1574 {
1575  struct onenand_chip *this = mtd->priv;
1576  int read = 0, thislen, column;
1577  int ret = 0, readcmd;
1578  size_t len = ops->ooblen;
1579  u_char *buf = ops->oobbuf;
1580 
1581  pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
1582  len);
1583 
1584  /* Initialize return value */
1585  ops->oobretlen = 0;
1586 
1587  /* Do not allow reads past end of device */
1588  if (unlikely((from + len) > mtd->size)) {
1589  printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1590  __func__);
1592  }
1593 
1594  /* Grab the lock and see if the device is available */
1595  onenand_get_device(mtd, FL_READING);
1596 
1597  column = from & (mtd->oobsize - 1);
1598 
1600 
1601  while (read < len) {
1602  cond_resched();
1603 
1604  thislen = mtd->oobsize - column;
1605  thislen = min_t(int, thislen, len);
1606 
1607  this->command(mtd, readcmd, from, mtd->oobsize);
1608 
1609  onenand_update_bufferram(mtd, from, 0);
1610 
1611  ret = this->bbt_wait(mtd, FL_READING);
1612  if (unlikely(ret))
1613  ret = onenand_recover_lsb(mtd, from, ret);
1614 
1615  if (ret)
1616  break;
1617 
1618  this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
1619  read += thislen;
1620  if (read == len)
1621  break;
1622 
1623  buf += thislen;
1624 
1625  /* Read more? */
1626  if (read < len) {
1627  /* Update Page size */
1628  from += this->writesize;
1629  column = 0;
1630  }
1631  }
1632 
1633  /* Deselect and wake up anyone waiting on the device */
1634  onenand_release_device(mtd);
1635 
1636  ops->oobretlen = read;
1637  return ret;
1638 }
1639 
1640 #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
1641 
1647 static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
1648 {
1649  struct onenand_chip *this = mtd->priv;
1650  u_char *oob_buf = this->oob_buf;
1651  int status, i, readcmd;
1652 
1654 
1655  this->command(mtd, readcmd, to, mtd->oobsize);
1656  onenand_update_bufferram(mtd, to, 0);
1657  status = this->wait(mtd, FL_READING);
1658  if (status)
1659  return status;
1660 
1661  this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
1662  for (i = 0; i < mtd->oobsize; i++)
1663  if (buf[i] != 0xFF && buf[i] != oob_buf[i])
1664  return -EBADMSG;
1665 
1666  return 0;
1667 }
1668 
1676 static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
1677 {
1678  struct onenand_chip *this = mtd->priv;
1679  int ret = 0;
1680  int thislen, column;
1681 
1682  column = addr & (this->writesize - 1);
1683 
1684  while (len != 0) {
1685  thislen = min_t(int, this->writesize - column, len);
1686 
1687  this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
1688 
1689  onenand_update_bufferram(mtd, addr, 0);
1690 
1691  ret = this->wait(mtd, FL_READING);
1692  if (ret)
1693  return ret;
1694 
1695  onenand_update_bufferram(mtd, addr, 1);
1696 
1697  this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
1698 
1699  if (memcmp(buf, this->verify_buf + column, thislen))
1700  return -EBADMSG;
1701 
1702  len -= thislen;
1703  buf += thislen;
1704  addr += thislen;
1705  column = 0;
1706  }
1707 
1708  return 0;
1709 }
1710 #else
1711 #define onenand_verify(...) (0)
1712 #define onenand_verify_oob(...) (0)
1713 #endif
1714 
1715 #define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
1716 
1717 static void onenand_panic_wait(struct mtd_info *mtd)
1718 {
1719  struct onenand_chip *this = mtd->priv;
1720  unsigned int interrupt;
1721  int i;
1722 
1723  for (i = 0; i < 2000; i++) {
1724  interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1725  if (interrupt & ONENAND_INT_MASTER)
1726  break;
1727  udelay(10);
1728  }
1729 }
1730 
1741 static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1742  size_t *retlen, const u_char *buf)
1743 {
1744  struct onenand_chip *this = mtd->priv;
1745  int column, subpage;
1746  int written = 0;
1747  int ret = 0;
1748 
1749  if (this->state == FL_PM_SUSPENDED)
1750  return -EBUSY;
1751 
1752  /* Wait for any existing operation to clear */
1753  onenand_panic_wait(mtd);
1754 
1755  pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1756  (int)len);
1757 
1758  /* Reject writes, which are not page aligned */
1759  if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1760  printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1761  __func__);
1762  return -EINVAL;
1763  }
1764 
1765  column = to & (mtd->writesize - 1);
1766 
1767  /* Loop until all data write */
1768  while (written < len) {
1769  int thislen = min_t(int, mtd->writesize - column, len - written);
1770  u_char *wbuf = (u_char *) buf;
1771 
1772  this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
1773 
1774  /* Partial page write */
1775  subpage = thislen < mtd->writesize;
1776  if (subpage) {
1777  memset(this->page_buf, 0xff, mtd->writesize);
1778  memcpy(this->page_buf + column, buf, thislen);
1779  wbuf = this->page_buf;
1780  }
1781 
1782  this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
1783  this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
1784 
1785  this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
1786 
1787  onenand_panic_wait(mtd);
1788 
1789  /* In partial page write we don't update bufferram */
1790  onenand_update_bufferram(mtd, to, !ret && !subpage);
1791  if (ONENAND_IS_2PLANE(this)) {
1792  ONENAND_SET_BUFFERRAM1(this);
1793  onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
1794  }
1795 
1796  if (ret) {
1797  printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
1798  break;
1799  }
1800 
1801  written += thislen;
1802 
1803  if (written == len)
1804  break;
1805 
1806  column = 0;
1807  to += thislen;
1808  buf += thislen;
1809  }
1810 
1811  *retlen = written;
1812  return ret;
1813 }
1814 
1823 static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1824  const u_char *buf, int column, int thislen)
1825 {
1826  struct onenand_chip *this = mtd->priv;
1827  struct nand_oobfree *free;
1828  int writecol = column;
1829  int writeend = column + thislen;
1830  int lastgap = 0;
1831  unsigned int i;
1832 
1833  free = this->ecclayout->oobfree;
1834  for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1835  if (writecol >= lastgap)
1836  writecol += free->offset - lastgap;
1837  if (writeend >= lastgap)
1838  writeend += free->offset - lastgap;
1839  lastgap = free->offset + free->length;
1840  }
1841  free = this->ecclayout->oobfree;
1842  for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1843  int free_end = free->offset + free->length;
1844  if (free->offset < writeend && free_end > writecol) {
1845  int st = max_t(int,free->offset,writecol);
1846  int ed = min_t(int,free_end,writeend);
1847  int n = ed - st;
1848  memcpy(oob_buf + st, buf, n);
1849  buf += n;
1850  } else if (column == 0)
1851  break;
1852  }
1853  return 0;
1854 }
1855 
1864 static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1865  struct mtd_oob_ops *ops)
1866 {
1867  struct onenand_chip *this = mtd->priv;
1868  int written = 0, column, thislen = 0, subpage = 0;
1869  int prev = 0, prevlen = 0, prev_subpage = 0, first = 1;
1870  int oobwritten = 0, oobcolumn, thisooblen, oobsize;
1871  size_t len = ops->len;
1872  size_t ooblen = ops->ooblen;
1873  const u_char *buf = ops->datbuf;
1874  const u_char *oob = ops->oobbuf;
1875  u_char *oobbuf;
1876  int ret = 0, cmd;
1877 
1878  pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1879  (int)len);
1880 
1881  /* Initialize retlen, in case of early exit */
1882  ops->retlen = 0;
1883  ops->oobretlen = 0;
1884 
1885  /* Reject writes, which are not page aligned */
1886  if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1887  printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1888  __func__);
1889  return -EINVAL;
1890  }
1891 
1892  /* Check zero length */
1893  if (!len)
1894  return 0;
1895 
1896  if (ops->mode == MTD_OPS_AUTO_OOB)
1897  oobsize = this->ecclayout->oobavail;
1898  else
1899  oobsize = mtd->oobsize;
1900 
1901  oobcolumn = to & (mtd->oobsize - 1);
1902 
1903  column = to & (mtd->writesize - 1);
1904 
1905  /* Loop until all data write */
1906  while (1) {
1907  if (written < len) {
1908  u_char *wbuf = (u_char *) buf;
1909 
1910  thislen = min_t(int, mtd->writesize - column, len - written);
1911  thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
1912 
1913  cond_resched();
1914 
1915  this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
1916 
1917  /* Partial page write */
1918  subpage = thislen < mtd->writesize;
1919  if (subpage) {
1920  memset(this->page_buf, 0xff, mtd->writesize);
1921  memcpy(this->page_buf + column, buf, thislen);
1922  wbuf = this->page_buf;
1923  }
1924 
1925  this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
1926 
1927  if (oob) {
1928  oobbuf = this->oob_buf;
1929 
1930  /* We send data to spare ram with oobsize
1931  * to prevent byte access */
1932  memset(oobbuf, 0xff, mtd->oobsize);
1933  if (ops->mode == MTD_OPS_AUTO_OOB)
1934  onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
1935  else
1936  memcpy(oobbuf + oobcolumn, oob, thisooblen);
1937 
1938  oobwritten += thisooblen;
1939  oob += thisooblen;
1940  oobcolumn = 0;
1941  } else
1942  oobbuf = (u_char *) ffchars;
1943 
1944  this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
1945  } else
1947 
1948  /*
1949  * 2 PLANE, MLC, and Flex-OneNAND do not support
1950  * write-while-program feature.
1951  */
1952  if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
1954 
1955  ret = this->wait(mtd, FL_WRITING);
1956 
1957  /* In partial page write we don't update bufferram */
1958  onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
1959  if (ret) {
1960  written -= prevlen;
1961  printk(KERN_ERR "%s: write failed %d\n",
1962  __func__, ret);
1963  break;
1964  }
1965 
1966  if (written == len) {
1967  /* Only check verify write turn on */
1968  ret = onenand_verify(mtd, buf - len, to - len, len);
1969  if (ret)
1970  printk(KERN_ERR "%s: verify failed %d\n",
1971  __func__, ret);
1972  break;
1973  }
1974 
1976  }
1977 
1978  this->ongoing = 0;
1979  cmd = ONENAND_CMD_PROG;
1980 
1981  /* Exclude 1st OTP and OTP blocks for cache program feature */
1982  if (ONENAND_IS_CACHE_PROGRAM(this) &&
1983  likely(onenand_block(this, to) != 0) &&
1984  ONENAND_IS_4KB_PAGE(this) &&
1985  ((written + thislen) < len)) {
1987  this->ongoing = 1;
1988  }
1989 
1990  this->command(mtd, cmd, to, mtd->writesize);
1991 
1992  /*
1993  * 2 PLANE, MLC, and Flex-OneNAND wait here
1994  */
1995  if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
1996  ret = this->wait(mtd, FL_WRITING);
1997 
1998  /* In partial page write we don't update bufferram */
1999  onenand_update_bufferram(mtd, to, !ret && !subpage);
2000  if (ret) {
2001  printk(KERN_ERR "%s: write failed %d\n",
2002  __func__, ret);
2003  break;
2004  }
2005 
2006  /* Only check verify write turn on */
2007  ret = onenand_verify(mtd, buf, to, thislen);
2008  if (ret) {
2009  printk(KERN_ERR "%s: verify failed %d\n",
2010  __func__, ret);
2011  break;
2012  }
2013 
2014  written += thislen;
2015 
2016  if (written == len)
2017  break;
2018 
2019  } else
2020  written += thislen;
2021 
2022  column = 0;
2023  prev_subpage = subpage;
2024  prev = to;
2025  prevlen = thislen;
2026  to += thislen;
2027  buf += thislen;
2028  first = 0;
2029  }
2030 
2031  /* In error case, clear all bufferrams */
2032  if (written != len)
2033  onenand_invalidate_bufferram(mtd, 0, -1);
2034 
2035  ops->retlen = written;
2036  ops->oobretlen = oobwritten;
2037 
2038  return ret;
2039 }
2040 
2041 
2053 static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2054  struct mtd_oob_ops *ops)
2055 {
2056  struct onenand_chip *this = mtd->priv;
2057  int column, ret = 0, oobsize;
2058  int written = 0, oobcmd;
2059  u_char *oobbuf;
2060  size_t len = ops->ooblen;
2061  const u_char *buf = ops->oobbuf;
2062  unsigned int mode = ops->mode;
2063 
2064  to += ops->ooboffs;
2065 
2066  pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
2067  (int)len);
2068 
2069  /* Initialize retlen, in case of early exit */
2070  ops->oobretlen = 0;
2071 
2072  if (mode == MTD_OPS_AUTO_OOB)
2073  oobsize = this->ecclayout->oobavail;
2074  else
2075  oobsize = mtd->oobsize;
2076 
2077  column = to & (mtd->oobsize - 1);
2078 
2079  if (unlikely(column >= oobsize)) {
2080  printk(KERN_ERR "%s: Attempted to start write outside oob\n",
2081  __func__);
2082  return -EINVAL;
2083  }
2084 
2085  /* For compatibility with NAND: Do not allow write past end of page */
2086  if (unlikely(column + len > oobsize)) {
2087  printk(KERN_ERR "%s: Attempt to write past end of page\n",
2088  __func__);
2089  return -EINVAL;
2090  }
2091 
2092  /* Do not allow reads past end of device */
2093  if (unlikely(to >= mtd->size ||
2094  column + len > ((mtd->size >> this->page_shift) -
2095  (to >> this->page_shift)) * oobsize)) {
2096  printk(KERN_ERR "%s: Attempted to write past end of device\n",
2097  __func__);
2098  return -EINVAL;
2099  }
2100 
2101  oobbuf = this->oob_buf;
2102 
2104 
2105  /* Loop until all data write */
2106  while (written < len) {
2107  int thislen = min_t(int, oobsize, len - written);
2108 
2109  cond_resched();
2110 
2111  this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
2112 
2113  /* We send data to spare ram with oobsize
2114  * to prevent byte access */
2115  memset(oobbuf, 0xff, mtd->oobsize);
2116  if (mode == MTD_OPS_AUTO_OOB)
2117  onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
2118  else
2119  memcpy(oobbuf + column, buf, thislen);
2120  this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
2121 
2122  if (ONENAND_IS_4KB_PAGE(this)) {
2123  /* Set main area of DataRAM to 0xff*/
2124  memset(this->page_buf, 0xff, mtd->writesize);
2125  this->write_bufferram(mtd, ONENAND_DATARAM,
2126  this->page_buf, 0, mtd->writesize);
2127  }
2128 
2129  this->command(mtd, oobcmd, to, mtd->oobsize);
2130 
2131  onenand_update_bufferram(mtd, to, 0);
2132  if (ONENAND_IS_2PLANE(this)) {
2133  ONENAND_SET_BUFFERRAM1(this);
2134  onenand_update_bufferram(mtd, to + this->writesize, 0);
2135  }
2136 
2137  ret = this->wait(mtd, FL_WRITING);
2138  if (ret) {
2139  printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2140  break;
2141  }
2142 
2143  ret = onenand_verify_oob(mtd, oobbuf, to);
2144  if (ret) {
2145  printk(KERN_ERR "%s: verify failed %d\n",
2146  __func__, ret);
2147  break;
2148  }
2149 
2150  written += thislen;
2151  if (written == len)
2152  break;
2153 
2154  to += mtd->writesize;
2155  buf += thislen;
2156  column = 0;
2157  }
2158 
2159  ops->oobretlen = written;
2160 
2161  return ret;
2162 }
2163 
2174 static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
2175  size_t *retlen, const u_char *buf)
2176 {
2177  struct mtd_oob_ops ops = {
2178  .len = len,
2179  .ooblen = 0,
2180  .datbuf = (u_char *) buf,
2181  .oobbuf = NULL,
2182  };
2183  int ret;
2184 
2185  onenand_get_device(mtd, FL_WRITING);
2186  ret = onenand_write_ops_nolock(mtd, to, &ops);
2187  onenand_release_device(mtd);
2188 
2189  *retlen = ops.retlen;
2190  return ret;
2191 }
2192 
2199 static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
2200  struct mtd_oob_ops *ops)
2201 {
2202  int ret;
2203 
2204  switch (ops->mode) {
2205  case MTD_OPS_PLACE_OOB:
2206  case MTD_OPS_AUTO_OOB:
2207  break;
2208  case MTD_OPS_RAW:
2209  /* Not implemented yet */
2210  default:
2211  return -EINVAL;
2212  }
2213 
2214  onenand_get_device(mtd, FL_WRITING);
2215  if (ops->datbuf)
2216  ret = onenand_write_ops_nolock(mtd, to, ops);
2217  else
2218  ret = onenand_write_oob_nolock(mtd, to, ops);
2219  onenand_release_device(mtd);
2220 
2221  return ret;
2222 }
2223 
2233 static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
2234 {
2235  struct onenand_chip *this = mtd->priv;
2236  struct bbm_info *bbm = this->bbm;
2237 
2238  /* Return info from the table */
2239  return bbm->isbad_bbt(mtd, ofs, allowbbt);
2240 }
2241 
2242 
2243 static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2244  struct erase_info *instr)
2245 {
2246  struct onenand_chip *this = mtd->priv;
2247  loff_t addr = instr->addr;
2248  int len = instr->len;
2249  unsigned int block_size = (1 << this->erase_shift);
2250  int ret = 0;
2251 
2252  while (len) {
2253  this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
2254  ret = this->wait(mtd, FL_VERIFYING_ERASE);
2255  if (ret) {
2256  printk(KERN_ERR "%s: Failed verify, block %d\n",
2257  __func__, onenand_block(this, addr));
2258  instr->state = MTD_ERASE_FAILED;
2259  instr->fail_addr = addr;
2260  return -1;
2261  }
2262  len -= block_size;
2263  addr += block_size;
2264  }
2265  return 0;
2266 }
2267 
2276 static int onenand_multiblock_erase(struct mtd_info *mtd,
2277  struct erase_info *instr,
2278  unsigned int block_size)
2279 {
2280  struct onenand_chip *this = mtd->priv;
2281  loff_t addr = instr->addr;
2282  int len = instr->len;
2283  int eb_count = 0;
2284  int ret = 0;
2285  int bdry_block = 0;
2286 
2287  instr->state = MTD_ERASING;
2288 
2289  if (ONENAND_IS_DDP(this)) {
2290  loff_t bdry_addr = this->chipsize >> 1;
2291  if (addr < bdry_addr && (addr + len) > bdry_addr)
2292  bdry_block = bdry_addr >> this->erase_shift;
2293  }
2294 
2295  /* Pre-check bbs */
2296  while (len) {
2297  /* Check if we have a bad block, we do not erase bad blocks */
2298  if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2299  printk(KERN_WARNING "%s: attempt to erase a bad block "
2300  "at addr 0x%012llx\n",
2301  __func__, (unsigned long long) addr);
2302  instr->state = MTD_ERASE_FAILED;
2303  return -EIO;
2304  }
2305  len -= block_size;
2306  addr += block_size;
2307  }
2308 
2309  len = instr->len;
2310  addr = instr->addr;
2311 
2312  /* loop over 64 eb batches */
2313  while (len) {
2314  struct erase_info verify_instr = *instr;
2315  int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
2316 
2317  verify_instr.addr = addr;
2318  verify_instr.len = 0;
2319 
2320  /* do not cross chip boundary */
2321  if (bdry_block) {
2322  int this_block = (addr >> this->erase_shift);
2323 
2324  if (this_block < bdry_block) {
2325  max_eb_count = min(max_eb_count,
2326  (bdry_block - this_block));
2327  }
2328  }
2329 
2330  eb_count = 0;
2331 
2332  while (len > block_size && eb_count < (max_eb_count - 1)) {
2334  addr, block_size);
2335  onenand_invalidate_bufferram(mtd, addr, block_size);
2336 
2337  ret = this->wait(mtd, FL_PREPARING_ERASE);
2338  if (ret) {
2339  printk(KERN_ERR "%s: Failed multiblock erase, "
2340  "block %d\n", __func__,
2341  onenand_block(this, addr));
2342  instr->state = MTD_ERASE_FAILED;
2344  return -EIO;
2345  }
2346 
2347  len -= block_size;
2348  addr += block_size;
2349  eb_count++;
2350  }
2351 
2352  /* last block of 64-eb series */
2353  cond_resched();
2354  this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
2355  onenand_invalidate_bufferram(mtd, addr, block_size);
2356 
2357  ret = this->wait(mtd, FL_ERASING);
2358  /* Check if it is write protected */
2359  if (ret) {
2360  printk(KERN_ERR "%s: Failed erase, block %d\n",
2361  __func__, onenand_block(this, addr));
2362  instr->state = MTD_ERASE_FAILED;
2364  return -EIO;
2365  }
2366 
2367  len -= block_size;
2368  addr += block_size;
2369  eb_count++;
2370 
2371  /* verify */
2372  verify_instr.len = eb_count * block_size;
2373  if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
2374  instr->state = verify_instr.state;
2375  instr->fail_addr = verify_instr.fail_addr;
2376  return -EIO;
2377  }
2378 
2379  }
2380  return 0;
2381 }
2382 
2383 
2393 static int onenand_block_by_block_erase(struct mtd_info *mtd,
2394  struct erase_info *instr,
2395  struct mtd_erase_region_info *region,
2396  unsigned int block_size)
2397 {
2398  struct onenand_chip *this = mtd->priv;
2399  loff_t addr = instr->addr;
2400  int len = instr->len;
2401  loff_t region_end = 0;
2402  int ret = 0;
2403 
2404  if (region) {
2405  /* region is set for Flex-OneNAND */
2406  region_end = region->offset + region->erasesize * region->numblocks;
2407  }
2408 
2409  instr->state = MTD_ERASING;
2410 
2411  /* Loop through the blocks */
2412  while (len) {
2413  cond_resched();
2414 
2415  /* Check if we have a bad block, we do not erase bad blocks */
2416  if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2417  printk(KERN_WARNING "%s: attempt to erase a bad block "
2418  "at addr 0x%012llx\n",
2419  __func__, (unsigned long long) addr);
2420  instr->state = MTD_ERASE_FAILED;
2421  return -EIO;
2422  }
2423 
2424  this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
2425 
2426  onenand_invalidate_bufferram(mtd, addr, block_size);
2427 
2428  ret = this->wait(mtd, FL_ERASING);
2429  /* Check, if it is write protected */
2430  if (ret) {
2431  printk(KERN_ERR "%s: Failed erase, block %d\n",
2432  __func__, onenand_block(this, addr));
2433  instr->state = MTD_ERASE_FAILED;
2434  instr->fail_addr = addr;
2435  return -EIO;
2436  }
2437 
2438  len -= block_size;
2439  addr += block_size;
2440 
2441  if (region && addr == region_end) {
2442  if (!len)
2443  break;
2444  region++;
2445 
2446  block_size = region->erasesize;
2447  region_end = region->offset + region->erasesize * region->numblocks;
2448 
2449  if (len & (block_size - 1)) {
2450  /* FIXME: This should be handled at MTD partitioning level. */
2451  printk(KERN_ERR "%s: Unaligned address\n",
2452  __func__);
2453  return -EIO;
2454  }
2455  }
2456  }
2457  return 0;
2458 }
2459 
2467 static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2468 {
2469  struct onenand_chip *this = mtd->priv;
2470  unsigned int block_size;
2471  loff_t addr = instr->addr;
2472  loff_t len = instr->len;
2473  int ret = 0;
2474  struct mtd_erase_region_info *region = NULL;
2475  loff_t region_offset = 0;
2476 
2477  pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
2478  (unsigned long long)instr->addr,
2479  (unsigned long long)instr->len);
2480 
2481  if (FLEXONENAND(this)) {
2482  /* Find the eraseregion of this address */
2483  int i = flexonenand_region(mtd, addr);
2484 
2485  region = &mtd->eraseregions[i];
2486  block_size = region->erasesize;
2487 
2488  /* Start address within region must align on block boundary.
2489  * Erase region's start offset is always block start address.
2490  */
2491  region_offset = region->offset;
2492  } else
2493  block_size = 1 << this->erase_shift;
2494 
2495  /* Start address must align on block boundary */
2496  if (unlikely((addr - region_offset) & (block_size - 1))) {
2497  printk(KERN_ERR "%s: Unaligned address\n", __func__);
2498  return -EINVAL;
2499  }
2500 
2501  /* Length must align on block boundary */
2502  if (unlikely(len & (block_size - 1))) {
2503  printk(KERN_ERR "%s: Length not block aligned\n", __func__);
2504  return -EINVAL;
2505  }
2506 
2507  /* Grab the lock and see if the device is available */
2508  onenand_get_device(mtd, FL_ERASING);
2509 
2510  if (ONENAND_IS_4KB_PAGE(this) || region ||
2511  instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2512  /* region is set for Flex-OneNAND (no mb erase) */
2513  ret = onenand_block_by_block_erase(mtd, instr,
2514  region, block_size);
2515  } else {
2516  ret = onenand_multiblock_erase(mtd, instr, block_size);
2517  }
2518 
2519  /* Deselect and wake up anyone waiting on the device */
2520  onenand_release_device(mtd);
2521 
2522  /* Do call back function */
2523  if (!ret) {
2524  instr->state = MTD_ERASE_DONE;
2525  mtd_erase_callback(instr);
2526  }
2527 
2528  return ret;
2529 }
2530 
2537 static void onenand_sync(struct mtd_info *mtd)
2538 {
2539  pr_debug("%s: called\n", __func__);
2540 
2541  /* Grab the lock and see if the device is available */
2542  onenand_get_device(mtd, FL_SYNCING);
2543 
2544  /* Release it and go back */
2545  onenand_release_device(mtd);
2546 }
2547 
2555 static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
2556 {
2557  int ret;
2558 
2559  /* Check for invalid offset */
2560  if (ofs > mtd->size)
2561  return -EINVAL;
2562 
2563  onenand_get_device(mtd, FL_READING);
2564  ret = onenand_block_isbad_nolock(mtd, ofs, 0);
2565  onenand_release_device(mtd);
2566  return ret;
2567 }
2568 
2577 static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
2578 {
2579  struct onenand_chip *this = mtd->priv;
2580  struct bbm_info *bbm = this->bbm;
2581  u_char buf[2] = {0, 0};
2582  struct mtd_oob_ops ops = {
2584  .ooblen = 2,
2585  .oobbuf = buf,
2586  .ooboffs = 0,
2587  };
2588  int block;
2589 
2590  /* Get block number */
2591  block = onenand_block(this, ofs);
2592  if (bbm->bbt)
2593  bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
2594 
2595  /* We write two bytes, so we don't have to mess with 16-bit access */
2596  ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
2597  /* FIXME : What to do when marking SLC block in partition
2598  * with MLC erasesize? For now, it is not advisable to
2599  * create partitions containing both SLC and MLC regions.
2600  */
2601  return onenand_write_oob_nolock(mtd, ofs, &ops);
2602 }
2603 
2611 static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2612 {
2613  int ret;
2614 
2615  ret = onenand_block_isbad(mtd, ofs);
2616  if (ret) {
2617  /* If it was bad already, return success and do nothing */
2618  if (ret > 0)
2619  return 0;
2620  return ret;
2621  }
2622 
2623  onenand_get_device(mtd, FL_WRITING);
2624  ret = mtd_block_markbad(mtd, ofs);
2625  onenand_release_device(mtd);
2626  return ret;
2627 }
2628 
2638 static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
2639 {
2640  struct onenand_chip *this = mtd->priv;
2641  int start, end, block, value, status;
2642  int wp_status_mask;
2643 
2644  start = onenand_block(this, ofs);
2645  end = onenand_block(this, ofs + len) - 1;
2646 
2647  if (cmd == ONENAND_CMD_LOCK)
2648  wp_status_mask = ONENAND_WP_LS;
2649  else
2650  wp_status_mask = ONENAND_WP_US;
2651 
2652  /* Continuous lock scheme */
2653  if (this->options & ONENAND_HAS_CONT_LOCK) {
2654  /* Set start block address */
2655  this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
2656  /* Set end block address */
2657  this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
2658  /* Write lock command */
2659  this->command(mtd, cmd, 0, 0);
2660 
2661  /* There's no return value */
2662  this->wait(mtd, FL_LOCKING);
2663 
2664  /* Sanity check */
2665  while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
2667  continue;
2668 
2669  /* Check lock status */
2670  status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2671  if (!(status & wp_status_mask))
2672  printk(KERN_ERR "%s: wp status = 0x%x\n",
2673  __func__, status);
2674 
2675  return 0;
2676  }
2677 
2678  /* Block lock scheme */
2679  for (block = start; block < end + 1; block++) {
2680  /* Set block address */
2681  value = onenand_block_address(this, block);
2682  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
2683  /* Select DataRAM for DDP */
2684  value = onenand_bufferram_address(this, block);
2685  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
2686  /* Set start block address */
2687  this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
2688  /* Write lock command */
2689  this->command(mtd, cmd, 0, 0);
2690 
2691  /* There's no return value */
2692  this->wait(mtd, FL_LOCKING);
2693 
2694  /* Sanity check */
2695  while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
2696  & ONENAND_CTRL_ONGO)
2697  continue;
2698 
2699  /* Check lock status */
2700  status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2701  if (!(status & wp_status_mask))
2702  printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2703  __func__, block, status);
2704  }
2705 
2706  return 0;
2707 }
2708 
2717 static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2718 {
2719  int ret;
2720 
2721  onenand_get_device(mtd, FL_LOCKING);
2722  ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
2723  onenand_release_device(mtd);
2724  return ret;
2725 }
2726 
2735 static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2736 {
2737  int ret;
2738 
2739  onenand_get_device(mtd, FL_LOCKING);
2740  ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
2741  onenand_release_device(mtd);
2742  return ret;
2743 }
2744 
2751 static int onenand_check_lock_status(struct onenand_chip *this)
2752 {
2753  unsigned int value, block, status;
2754  unsigned int end;
2755 
2756  end = this->chipsize >> this->erase_shift;
2757  for (block = 0; block < end; block++) {
2758  /* Set block address */
2759  value = onenand_block_address(this, block);
2760  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
2761  /* Select DataRAM for DDP */
2762  value = onenand_bufferram_address(this, block);
2763  this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
2764  /* Set start block address */
2765  this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
2766 
2767  /* Check lock status */
2768  status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2769  if (!(status & ONENAND_WP_US)) {
2770  printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2771  __func__, block, status);
2772  return 0;
2773  }
2774  }
2775 
2776  return 1;
2777 }
2778 
2785 static void onenand_unlock_all(struct mtd_info *mtd)
2786 {
2787  struct onenand_chip *this = mtd->priv;
2788  loff_t ofs = 0;
2789  loff_t len = mtd->size;
2790 
2791  if (this->options & ONENAND_HAS_UNLOCK_ALL) {
2792  /* Set start block address */
2794  /* Write unlock command */
2795  this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
2796 
2797  /* There's no return value */
2798  this->wait(mtd, FL_LOCKING);
2799 
2800  /* Sanity check */
2801  while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
2802  & ONENAND_CTRL_ONGO)
2803  continue;
2804 
2805  /* Don't check lock status */
2806  if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
2807  return;
2808 
2809  /* Check lock status */
2810  if (onenand_check_lock_status(this))
2811  return;
2812 
2813  /* Workaround for all block unlock in DDP */
2814  if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
2815  /* All blocks on another chip */
2816  ofs = this->chipsize >> 1;
2817  len = this->chipsize >> 1;
2818  }
2819  }
2820 
2821  onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
2822 }
2823 
2824 #ifdef CONFIG_MTD_ONENAND_OTP
2825 
2833 static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2834  size_t len)
2835 {
2836  struct onenand_chip *this = mtd->priv;
2837  int value, block, page;
2838 
2839  /* Address translation */
2840  switch (cmd) {
2842  block = (int) (addr >> this->erase_shift);
2843  page = -1;
2844  break;
2845 
2846  default:
2847  block = (int) (addr >> this->erase_shift);
2848  page = (int) (addr >> this->page_shift);
2849 
2850  if (ONENAND_IS_2PLANE(this)) {
2851  /* Make the even block number */
2852  block &= ~1;
2853  /* Is it the odd plane? */
2854  if (addr & this->writesize)
2855  block++;
2856  page >>= 1;
2857  }
2858  page &= this->page_mask;
2859  break;
2860  }
2861 
2862  if (block != -1) {
2863  /* Write 'DFS, FBA' of Flash */
2864  value = onenand_block_address(this, block);
2865  this->write_word(value, this->base +
2867  }
2868 
2869  if (page != -1) {
2870  /* Now we use page size operation */
2871  int sectors = 4, count = 4;
2872  int dataram;
2873 
2874  switch (cmd) {
2875  default:
2876  if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
2877  cmd = ONENAND_CMD_2X_PROG;
2878  dataram = ONENAND_CURRENT_BUFFERRAM(this);
2879  break;
2880  }
2881 
2882  /* Write 'FPA, FSA' of Flash */
2883  value = onenand_page_address(page, sectors);
2884  this->write_word(value, this->base +
2886 
2887  /* Write 'BSA, BSC' of DataRAM */
2888  value = onenand_buffer_address(dataram, sectors, count);
2889  this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
2890  }
2891 
2892  /* Interrupt clear */
2894 
2895  /* Write command */
2896  this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
2897 
2898  return 0;
2899 }
2900 
2911 static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2912  struct mtd_oob_ops *ops)
2913 {
2914  struct onenand_chip *this = mtd->priv;
2915  int column, ret = 0, oobsize;
2916  int written = 0;
2917  u_char *oobbuf;
2918  size_t len = ops->ooblen;
2919  const u_char *buf = ops->oobbuf;
2920  int block, value, status;
2921 
2922  to += ops->ooboffs;
2923 
2924  /* Initialize retlen, in case of early exit */
2925  ops->oobretlen = 0;
2926 
2927  oobsize = mtd->oobsize;
2928 
2929  column = to & (mtd->oobsize - 1);
2930 
2931  oobbuf = this->oob_buf;
2932 
2933  /* Loop until all data write */
2934  while (written < len) {
2935  int thislen = min_t(int, oobsize, len - written);
2936 
2937  cond_resched();
2938 
2939  block = (int) (to >> this->erase_shift);
2940  /*
2941  * Write 'DFS, FBA' of Flash
2942  * Add: F100h DQ=DFS, FBA
2943  */
2944 
2945  value = onenand_block_address(this, block);
2946  this->write_word(value, this->base +
2948 
2949  /*
2950  * Select DataRAM for DDP
2951  * Add: F101h DQ=DBS
2952  */
2953 
2954  value = onenand_bufferram_address(this, block);
2955  this->write_word(value, this->base +
2958 
2959  /*
2960  * Enter OTP access mode
2961  */
2962  this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2963  this->wait(mtd, FL_OTPING);
2964 
2965  /* We send data to spare ram with oobsize
2966  * to prevent byte access */
2967  memcpy(oobbuf + column, buf, thislen);
2968 
2969  /*
2970  * Write Data into DataRAM
2971  * Add: 8th Word
2972  * in sector0/spare/page0
2973  * DQ=XXFCh
2974  */
2975  this->write_bufferram(mtd, ONENAND_SPARERAM,
2976  oobbuf, 0, mtd->oobsize);
2977 
2978  onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
2979  onenand_update_bufferram(mtd, to, 0);
2980  if (ONENAND_IS_2PLANE(this)) {
2981  ONENAND_SET_BUFFERRAM1(this);
2982  onenand_update_bufferram(mtd, to + this->writesize, 0);
2983  }
2984 
2985  ret = this->wait(mtd, FL_WRITING);
2986  if (ret) {
2987  printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2988  break;
2989  }
2990 
2991  /* Exit OTP access mode */
2992  this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2993  this->wait(mtd, FL_RESETING);
2994 
2995  status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
2996  status &= 0x60;
2997 
2998  if (status == 0x60) {
2999  printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
3000  printk(KERN_DEBUG "1st Block\tLOCKED\n");
3001  printk(KERN_DEBUG "OTP Block\tLOCKED\n");
3002  } else if (status == 0x20) {
3003  printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
3004  printk(KERN_DEBUG "1st Block\tLOCKED\n");
3005  printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
3006  } else if (status == 0x40) {
3007  printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
3008  printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
3009  printk(KERN_DEBUG "OTP Block\tLOCKED\n");
3010  } else {
3011  printk(KERN_DEBUG "Reboot to check\n");
3012  }
3013 
3014  written += thislen;
3015  if (written == len)
3016  break;
3017 
3018  to += mtd->writesize;
3019  buf += thislen;
3020  column = 0;
3021  }
3022 
3023  ops->oobretlen = written;
3024 
3025  return ret;
3026 }
3027 
3028 /* Internal OTP operation */
3029 typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
3030  size_t *retlen, u_char *buf);
3031 
3042 static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
3043  size_t *retlen, u_char *buf)
3044 {
3045  struct onenand_chip *this = mtd->priv;
3046  struct mtd_oob_ops ops = {
3047  .len = len,
3048  .ooblen = 0,
3049  .datbuf = buf,
3050  .oobbuf = NULL,
3051  };
3052  int ret;
3053 
3054  /* Enter OTP access mode */
3055  this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3056  this->wait(mtd, FL_OTPING);
3057 
3058  ret = ONENAND_IS_4KB_PAGE(this) ?
3059  onenand_mlc_read_ops_nolock(mtd, from, &ops) :
3060  onenand_read_ops_nolock(mtd, from, &ops);
3061 
3062  /* Exit OTP access mode */
3063  this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3064  this->wait(mtd, FL_RESETING);
3065 
3066  return ret;
3067 }
3068 
3079 static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
3080  size_t *retlen, u_char *buf)
3081 {
3082  struct onenand_chip *this = mtd->priv;
3083  unsigned char *pbuf = buf;
3084  int ret;
3085  struct mtd_oob_ops ops;
3086 
3087  /* Force buffer page aligned */
3088  if (len < mtd->writesize) {
3089  memcpy(this->page_buf, buf, len);
3090  memset(this->page_buf + len, 0xff, mtd->writesize - len);
3091  pbuf = this->page_buf;
3092  len = mtd->writesize;
3093  }
3094 
3095  /* Enter OTP access mode */
3096  this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3097  this->wait(mtd, FL_OTPING);
3098 
3099  ops.len = len;
3100  ops.ooblen = 0;
3101  ops.datbuf = pbuf;
3102  ops.oobbuf = NULL;
3103  ret = onenand_write_ops_nolock(mtd, to, &ops);
3104  *retlen = ops.retlen;
3105 
3106  /* Exit OTP access mode */
3107  this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3108  this->wait(mtd, FL_RESETING);
3109 
3110  return ret;
3111 }
3112 
3123 static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
3124  size_t *retlen, u_char *buf)
3125 {
3126  struct onenand_chip *this = mtd->priv;
3127  struct mtd_oob_ops ops;
3128  int ret;
3129 
3130  if (FLEXONENAND(this)) {
3131 
3132  /* Enter OTP access mode */
3133  this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3134  this->wait(mtd, FL_OTPING);
3135  /*
3136  * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
3137  * main area of page 49.
3138  */
3139  ops.len = mtd->writesize;
3140  ops.ooblen = 0;
3141  ops.datbuf = buf;
3142  ops.oobbuf = NULL;
3143  ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
3144  *retlen = ops.retlen;
3145 
3146  /* Exit OTP access mode */
3147  this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3148  this->wait(mtd, FL_RESETING);
3149  } else {
3150  ops.mode = MTD_OPS_PLACE_OOB;
3151  ops.ooblen = len;
3152  ops.oobbuf = buf;
3153  ops.ooboffs = 0;
3154  ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
3155  *retlen = ops.oobretlen;
3156  }
3157 
3158  return ret;
3159 }
3160 
3173 static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
3174  size_t *retlen, u_char *buf,
3175  otp_op_t action, int mode)
3176 {
3177  struct onenand_chip *this = mtd->priv;
3178  int otp_pages;
3179  int density;
3180  int ret = 0;
3181 
3182  *retlen = 0;
3183 
3184  density = onenand_get_density(this->device_id);
3185  if (density < ONENAND_DEVICE_DENSITY_512Mb)
3186  otp_pages = 20;
3187  else
3188  otp_pages = 50;
3189 
3190  if (mode == MTD_OTP_FACTORY) {
3191  from += mtd->writesize * otp_pages;
3192  otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
3193  }
3194 
3195  /* Check User/Factory boundary */
3196  if (mode == MTD_OTP_USER) {
3197  if (mtd->writesize * otp_pages < from + len)
3198  return 0;
3199  } else {
3200  if (mtd->writesize * otp_pages < len)
3201  return 0;
3202  }
3203 
3204  onenand_get_device(mtd, FL_OTPING);
3205  while (len > 0 && otp_pages > 0) {
3206  if (!action) { /* OTP Info functions */
3207  struct otp_info *otpinfo;
3208 
3209  len -= sizeof(struct otp_info);
3210  if (len <= 0) {
3211  ret = -ENOSPC;
3212  break;
3213  }
3214 
3215  otpinfo = (struct otp_info *) buf;
3216  otpinfo->start = from;
3217  otpinfo->length = mtd->writesize;
3218  otpinfo->locked = 0;
3219 
3220  from += mtd->writesize;
3221  buf += sizeof(struct otp_info);
3222  *retlen += sizeof(struct otp_info);
3223  } else {
3224  size_t tmp_retlen;
3225 
3226  ret = action(mtd, from, len, &tmp_retlen, buf);
3227 
3228  buf += tmp_retlen;
3229  len -= tmp_retlen;
3230  *retlen += tmp_retlen;
3231 
3232  if (ret)
3233  break;
3234  }
3235  otp_pages--;
3236  }
3237  onenand_release_device(mtd);
3238 
3239  return ret;
3240 }
3241 
3250 static int onenand_get_fact_prot_info(struct mtd_info *mtd,
3251  struct otp_info *buf, size_t len)
3252 {
3253  size_t retlen;
3254  int ret;
3255 
3256  ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_FACTORY);
3257 
3258  return ret ? : retlen;
3259 }
3260 
3271 static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
3272  size_t len, size_t *retlen, u_char *buf)
3273 {
3274  return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY);
3275 }
3276 
3285 static int onenand_get_user_prot_info(struct mtd_info *mtd,
3286  struct otp_info *buf, size_t len)
3287 {
3288  size_t retlen;
3289  int ret;
3290 
3291  ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_USER);
3292 
3293  return ret ? : retlen;
3294 }
3295 
3306 static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
3307  size_t len, size_t *retlen, u_char *buf)
3308 {
3309  return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER);
3310 }
3311 
3322 static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
3323  size_t len, size_t *retlen, u_char *buf)
3324 {
3325  return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_write, MTD_OTP_USER);
3326 }
3327 
3336 static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
3337  size_t len)
3338 {
3339  struct onenand_chip *this = mtd->priv;
3340  u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
3341  size_t retlen;
3342  int ret;
3343  unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
3344 
3345  memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
3346  : mtd->oobsize);
3347  /*
3348  * Write lock mark to 8th word of sector0 of page0 of the spare0.
3349  * We write 16 bytes spare area instead of 2 bytes.
3350  * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
3351  * main area of page 49.
3352  */
3353 
3354  from = 0;
3355  len = FLEXONENAND(this) ? mtd->writesize : 16;
3356 
3357  /*
3358  * Note: OTP lock operation
3359  * OTP block : 0xXXFC XX 1111 1100
3360  * 1st block : 0xXXF3 (If chip support) XX 1111 0011
3361  * Both : 0xXXF0 (If chip support) XX 1111 0000
3362  */
3363  if (FLEXONENAND(this))
3364  otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
3365 
3366  /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
3367  if (otp == 1)
3368  buf[otp_lock_offset] = 0xFC;
3369  else if (otp == 2)
3370  buf[otp_lock_offset] = 0xF3;
3371  else if (otp == 3)
3372  buf[otp_lock_offset] = 0xF0;
3373  else if (otp != 0)
3374  printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
3375 
3376  ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
3377 
3378  return ret ? : retlen;
3379 }
3380 
3381 #endif /* CONFIG_MTD_ONENAND_OTP */
3382 
3391 static void onenand_check_features(struct mtd_info *mtd)
3392 {
3393  struct onenand_chip *this = mtd->priv;
3394  unsigned int density, process, numbufs;
3395 
3396  /* Lock scheme depends on density and process */
3397  density = onenand_get_density(this->device_id);
3398  process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
3399  numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
3400 
3401  /* Lock scheme */
3402  switch (density) {
3404  if (ONENAND_IS_DDP(this))
3405  this->options |= ONENAND_HAS_2PLANE;
3406  else if (numbufs == 1) {
3407  this->options |= ONENAND_HAS_4KB_PAGE;
3409  /*
3410  * There are two different 4KiB pagesize chips
3411  * and no way to detect it by H/W config values.
3412  *
3413  * To detect the correct NOP for each chips,
3414  * It should check the version ID as workaround.
3415  *
3416  * Now it has as following
3417  * KFM4G16Q4M has NOP 4 with version ID 0x0131
3418  * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
3419  */
3420  if ((this->version_id & 0xf) == 0xe)
3421  this->options |= ONENAND_HAS_NOP_1;
3422  }
3423 
3425  /* 2Gb DDP does not have 2 plane */
3426  if (!ONENAND_IS_DDP(this))
3427  this->options |= ONENAND_HAS_2PLANE;
3429 
3431  /* A-Die has all block unlock */
3432  if (process)
3434  break;
3435 
3436  default:
3437  /* Some OneNAND has continuous lock scheme */
3438  if (!process)
3439  this->options |= ONENAND_HAS_CONT_LOCK;
3440  break;
3441  }
3442 
3443  /* The MLC has 4KiB pagesize. */
3444  if (ONENAND_IS_MLC(this))
3445  this->options |= ONENAND_HAS_4KB_PAGE;
3446 
3447  if (ONENAND_IS_4KB_PAGE(this))
3448  this->options &= ~ONENAND_HAS_2PLANE;
3449 
3450  if (FLEXONENAND(this)) {
3451  this->options &= ~ONENAND_HAS_CONT_LOCK;
3453  }
3454 
3455  if (this->options & ONENAND_HAS_CONT_LOCK)
3456  printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
3457  if (this->options & ONENAND_HAS_UNLOCK_ALL)
3458  printk(KERN_DEBUG "Chip support all block unlock\n");
3459  if (this->options & ONENAND_HAS_2PLANE)
3460  printk(KERN_DEBUG "Chip has 2 plane\n");
3461  if (this->options & ONENAND_HAS_4KB_PAGE)
3462  printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
3463  if (this->options & ONENAND_HAS_CACHE_PROGRAM)
3464  printk(KERN_DEBUG "Chip has cache program feature\n");
3465 }
3466 
3474 static void onenand_print_device_info(int device, int version)
3475 {
3476  int vcc, demuxed, ddp, density, flexonenand;
3477 
3478  vcc = device & ONENAND_DEVICE_VCC_MASK;
3479  demuxed = device & ONENAND_DEVICE_IS_DEMUX;
3480  ddp = device & ONENAND_DEVICE_IS_DDP;
3481  density = onenand_get_density(device);
3482  flexonenand = device & DEVICE_IS_FLEXONENAND;
3483  printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
3484  demuxed ? "" : "Muxed ",
3485  flexonenand ? "Flex-" : "",
3486  ddp ? "(DDP)" : "",
3487  (16 << density),
3488  vcc ? "2.65/3.3" : "1.8",
3489  device);
3490  printk(KERN_INFO "OneNAND version = 0x%04x\n", version);
3491 }
3492 
3493 static const struct onenand_manufacturers onenand_manuf_ids[] = {
3494  {ONENAND_MFR_SAMSUNG, "Samsung"},
3495  {ONENAND_MFR_NUMONYX, "Numonyx"},
3496 };
3497 
3504 static int onenand_check_maf(int manuf)
3505 {
3506  int size = ARRAY_SIZE(onenand_manuf_ids);
3507  char *name;
3508  int i;
3509 
3510  for (i = 0; i < size; i++)
3511  if (manuf == onenand_manuf_ids[i].id)
3512  break;
3513 
3514  if (i < size)
3515  name = onenand_manuf_ids[i].name;
3516  else
3517  name = "Unknown";
3518 
3519  printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
3520 
3521  return (i == size);
3522 }
3523 
3528 static int flexonenand_get_boundary(struct mtd_info *mtd)
3529 {
3530  struct onenand_chip *this = mtd->priv;
3531  unsigned die, bdry;
3532  int ret, syscfg, locked;
3533 
3534  /* Disable ECC */
3535  syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
3536  this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
3537 
3538  for (die = 0; die < this->dies; die++) {
3539  this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
3540  this->wait(mtd, FL_SYNCING);
3541 
3542  this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
3543  ret = this->wait(mtd, FL_READING);
3544 
3545  bdry = this->read_word(this->base + ONENAND_DATARAM);
3546  if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
3547  locked = 0;
3548  else
3549  locked = 1;
3550  this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
3551 
3552  this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3553  ret = this->wait(mtd, FL_RESETING);
3554 
3555  printk(KERN_INFO "Die %d boundary: %d%s\n", die,
3556  this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
3557  }
3558 
3559  /* Enable ECC */
3560  this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
3561  return 0;
3562 }
3563 
3569 static void flexonenand_get_size(struct mtd_info *mtd)
3570 {
3571  struct onenand_chip *this = mtd->priv;
3572  int die, i, eraseshift, density;
3573  int blksperdie, maxbdry;
3574  loff_t ofs;
3575 
3576  density = onenand_get_density(this->device_id);
3577  blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
3578  blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3579  maxbdry = blksperdie - 1;
3580  eraseshift = this->erase_shift - 1;
3581 
3582  mtd->numeraseregions = this->dies << 1;
3583 
3584  /* This fills up the device boundary */
3585  flexonenand_get_boundary(mtd);
3586  die = ofs = 0;
3587  i = -1;
3588  for (; die < this->dies; die++) {
3589  if (!die || this->boundary[die-1] != maxbdry) {
3590  i++;
3591  mtd->eraseregions[i].offset = ofs;
3592  mtd->eraseregions[i].erasesize = 1 << eraseshift;
3593  mtd->eraseregions[i].numblocks =
3594  this->boundary[die] + 1;
3595  ofs += mtd->eraseregions[i].numblocks << eraseshift;
3596  eraseshift++;
3597  } else {
3598  mtd->numeraseregions -= 1;
3599  mtd->eraseregions[i].numblocks +=
3600  this->boundary[die] + 1;
3601  ofs += (this->boundary[die] + 1) << (eraseshift - 1);
3602  }
3603  if (this->boundary[die] != maxbdry) {
3604  i++;
3605  mtd->eraseregions[i].offset = ofs;
3606  mtd->eraseregions[i].erasesize = 1 << eraseshift;
3607  mtd->eraseregions[i].numblocks = maxbdry ^
3608  this->boundary[die];
3609  ofs += mtd->eraseregions[i].numblocks << eraseshift;
3610  eraseshift--;
3611  } else
3612  mtd->numeraseregions -= 1;
3613  }
3614 
3615  /* Expose MLC erase size except when all blocks are SLC */
3616  mtd->erasesize = 1 << this->erase_shift;
3617  if (mtd->numeraseregions == 1)
3618  mtd->erasesize >>= 1;
3619 
3620  printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
3621  for (i = 0; i < mtd->numeraseregions; i++)
3622  printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
3623  " numblocks: %04u]\n",
3624  (unsigned int) mtd->eraseregions[i].offset,
3625  mtd->eraseregions[i].erasesize,
3626  mtd->eraseregions[i].numblocks);
3627 
3628  for (die = 0, mtd->size = 0; die < this->dies; die++) {
3629  this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
3630  this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
3631  << (this->erase_shift - 1);
3632  mtd->size += this->diesize[die];
3633  }
3634 }
3635 
3650 static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
3651 {
3652  struct onenand_chip *this = mtd->priv;
3653  int i, ret;
3654  int block;
3655  struct mtd_oob_ops ops = {
3657  .ooboffs = 0,
3658  .ooblen = mtd->oobsize,
3659  .datbuf = NULL,
3660  .oobbuf = this->oob_buf,
3661  };
3662  loff_t addr;
3663 
3664  printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
3665 
3666  for (block = start; block <= end; block++) {
3667  addr = flexonenand_addr(this, block);
3668  if (onenand_block_isbad_nolock(mtd, addr, 0))
3669  continue;
3670 
3671  /*
3672  * Since main area write results in ECC write to spare,
3673  * it is sufficient to check only ECC bytes for change.
3674  */
3675  ret = onenand_read_oob_nolock(mtd, addr, &ops);
3676  if (ret)
3677  return ret;
3678 
3679  for (i = 0; i < mtd->oobsize; i++)
3680  if (this->oob_buf[i] != 0xff)
3681  break;
3682 
3683  if (i != mtd->oobsize) {
3684  printk(KERN_WARNING "%s: Block %d not erased.\n",
3685  __func__, block);
3686  return 1;
3687  }
3688  }
3689 
3690  return 0;
3691 }
3692 
3697 static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3698  int boundary, int lock)
3699 {
3700  struct onenand_chip *this = mtd->priv;
3701  int ret, density, blksperdie, old, new, thisboundary;
3702  loff_t addr;
3703 
3704  /* Change only once for SDP Flex-OneNAND */
3705  if (die && (!ONENAND_IS_DDP(this)))
3706  return 0;
3707 
3708  /* boundary value of -1 indicates no required change */
3709  if (boundary < 0 || boundary == this->boundary[die])
3710  return 0;
3711 
3712  density = onenand_get_density(this->device_id);
3713  blksperdie = ((16 << density) << 20) >> this->erase_shift;
3714  blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3715 
3716  if (boundary >= blksperdie) {
3717  printk(KERN_ERR "%s: Invalid boundary value. "
3718  "Boundary not changed.\n", __func__);
3719  return -EINVAL;
3720  }
3721 
3722  /* Check if converting blocks are erased */
3723  old = this->boundary[die] + (die * this->density_mask);
3724  new = boundary + (die * this->density_mask);
3725  ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
3726  if (ret) {
3727  printk(KERN_ERR "%s: Please erase blocks "
3728  "before boundary change\n", __func__);
3729  return ret;
3730  }
3731 
3732  this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
3733  this->wait(mtd, FL_SYNCING);
3734 
3735  /* Check is boundary is locked */
3736  this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
3737  ret = this->wait(mtd, FL_READING);
3738 
3739  thisboundary = this->read_word(this->base + ONENAND_DATARAM);
3740  if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
3741  printk(KERN_ERR "%s: boundary locked\n", __func__);
3742  ret = 1;
3743  goto out;
3744  }
3745 
3746  printk(KERN_INFO "Changing die %d boundary: %d%s\n",
3747  die, boundary, lock ? "(Locked)" : "(Unlocked)");
3748 
3749  addr = die ? this->diesize[0] : 0;
3750 
3751  boundary &= FLEXONENAND_PI_MASK;
3752  boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
3753 
3754  this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
3755  ret = this->wait(mtd, FL_ERASING);
3756  if (ret) {
3757  printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
3758  __func__, die);
3759  goto out;
3760  }
3761 
3762  this->write_word(boundary, this->base + ONENAND_DATARAM);
3763  this->command(mtd, ONENAND_CMD_PROG, addr, 0);
3764  ret = this->wait(mtd, FL_WRITING);
3765  if (ret) {
3766  printk(KERN_ERR "%s: Failed PI write for Die %d\n",
3767  __func__, die);
3768  goto out;
3769  }
3770 
3771  this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
3772  ret = this->wait(mtd, FL_WRITING);
3773 out:
3775  this->wait(mtd, FL_RESETING);
3776  if (!ret)
3777  /* Recalculate device size on boundary change*/
3778  flexonenand_get_size(mtd);
3779 
3780  return ret;
3781 }
3782 
3790 static int onenand_chip_probe(struct mtd_info *mtd)
3791 {
3792  struct onenand_chip *this = mtd->priv;
3793  int bram_maf_id, bram_dev_id, maf_id, dev_id;
3794  int syscfg;
3795 
3796  /* Save system configuration 1 */
3797  syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
3798  /* Clear Sync. Burst Read mode to read BootRAM */
3800 
3801  /* Send the command for reading device ID from BootRAM */
3803 
3804  /* Read manufacturer and device IDs from BootRAM */
3805  bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
3806  bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
3807 
3808  /* Reset OneNAND to read default register values */
3810  /* Wait reset */
3811  this->wait(mtd, FL_RESETING);
3812 
3813  /* Restore system configuration 1 */
3814  this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
3815 
3816  /* Check manufacturer ID */
3817  if (onenand_check_maf(bram_maf_id))
3818  return -ENXIO;
3819 
3820  /* Read manufacturer and device IDs from Register */
3821  maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
3822  dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
3823 
3824  /* Check OneNAND device */
3825  if (maf_id != bram_maf_id || dev_id != bram_dev_id)
3826  return -ENXIO;
3827 
3828  return 0;
3829 }
3830 
3835 static int onenand_probe(struct mtd_info *mtd)
3836 {
3837  struct onenand_chip *this = mtd->priv;
3838  int maf_id, dev_id, ver_id;
3839  int density;
3840  int ret;
3841 
3842  ret = this->chip_probe(mtd);
3843  if (ret)
3844  return ret;
3845 
3846  /* Read manufacturer and device IDs from Register */
3847  maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
3848  dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
3849  ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
3850  this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
3851 
3852  /* Flash device information */
3853  onenand_print_device_info(dev_id, ver_id);
3854  this->device_id = dev_id;
3855  this->version_id = ver_id;
3856 
3857  /* Check OneNAND features */
3858  onenand_check_features(mtd);
3859 
3860  density = onenand_get_density(dev_id);
3861  if (FLEXONENAND(this)) {
3862  this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
3863  /* Maximum possible erase regions */
3864  mtd->numeraseregions = this->dies << 1;
3865  mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
3866  * (this->dies << 1), GFP_KERNEL);
3867  if (!mtd->eraseregions)
3868  return -ENOMEM;
3869  }
3870 
3871  /*
3872  * For Flex-OneNAND, chipsize represents maximum possible device size.
3873  * mtd->size represents the actual device size.
3874  */
3875  this->chipsize = (16 << density) << 20;
3876 
3877  /* OneNAND page size & block size */
3878  /* The data buffer size is equal to page size */
3879  mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
3880  /* We use the full BufferRAM */
3881  if (ONENAND_IS_4KB_PAGE(this))
3882  mtd->writesize <<= 1;
3883 
3884  mtd->oobsize = mtd->writesize >> 5;
3885  /* Pages per a block are always 64 in OneNAND */
3886  mtd->erasesize = mtd->writesize << 6;
3887  /*
3888  * Flex-OneNAND SLC area has 64 pages per block.
3889  * Flex-OneNAND MLC area has 128 pages per block.
3890  * Expose MLC erase size to find erase_shift and page_mask.
3891  */
3892  if (FLEXONENAND(this))
3893  mtd->erasesize <<= 1;
3894 
3895  this->erase_shift = ffs(mtd->erasesize) - 1;
3896  this->page_shift = ffs(mtd->writesize) - 1;
3897  this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
3898  /* Set density mask. it is used for DDP */
3899  if (ONENAND_IS_DDP(this))
3900  this->density_mask = this->chipsize >> (this->erase_shift + 1);
3901  /* It's real page size */
3902  this->writesize = mtd->writesize;
3903 
3904  /* REVISIT: Multichip handling */
3905 
3906  if (FLEXONENAND(this))
3907  flexonenand_get_size(mtd);
3908  else
3909  mtd->size = this->chipsize;
3910 
3911  /*
3912  * We emulate the 4KiB page and 256KiB erase block size
3913  * But oobsize is still 64 bytes.
3914  * It is only valid if you turn on 2X program support,
3915  * Otherwise it will be ignored by compiler.
3916  */
3917  if (ONENAND_IS_2PLANE(this)) {
3918  mtd->writesize <<= 1;
3919  mtd->erasesize <<= 1;
3920  }
3921 
3922  return 0;
3923 }
3924 
3929 static int onenand_suspend(struct mtd_info *mtd)
3930 {
3931  return onenand_get_device(mtd, FL_PM_SUSPENDED);
3932 }
3933 
3938 static void onenand_resume(struct mtd_info *mtd)
3939 {
3940  struct onenand_chip *this = mtd->priv;
3941 
3942  if (this->state == FL_PM_SUSPENDED)
3943  onenand_release_device(mtd);
3944  else
3945  printk(KERN_ERR "%s: resume() called for the chip which is not "
3946  "in suspended state\n", __func__);
3947 }
3948 
3959 int onenand_scan(struct mtd_info *mtd, int maxchips)
3960 {
3961  int i, ret;
3962  struct onenand_chip *this = mtd->priv;
3963 
3964  if (!this->read_word)
3965  this->read_word = onenand_readw;
3966  if (!this->write_word)
3967  this->write_word = onenand_writew;
3968 
3969  if (!this->command)
3970  this->command = onenand_command;
3971  if (!this->wait)
3972  onenand_setup_wait(mtd);
3973  if (!this->bbt_wait)
3974  this->bbt_wait = onenand_bbt_wait;
3975  if (!this->unlock_all)
3976  this->unlock_all = onenand_unlock_all;
3977 
3978  if (!this->chip_probe)
3979  this->chip_probe = onenand_chip_probe;
3980 
3981  if (!this->read_bufferram)
3982  this->read_bufferram = onenand_read_bufferram;
3983  if (!this->write_bufferram)
3984  this->write_bufferram = onenand_write_bufferram;
3985 
3986  if (!this->block_markbad)
3987  this->block_markbad = onenand_default_block_markbad;
3988  if (!this->scan_bbt)
3989  this->scan_bbt = onenand_default_bbt;
3990 
3991  if (onenand_probe(mtd))
3992  return -ENXIO;
3993 
3994  /* Set Sync. Burst Read after probing */
3995  if (this->mmcontrol) {
3996  printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
3997  this->read_bufferram = onenand_sync_read_bufferram;
3998  }
3999 
4000  /* Allocate buffers, if necessary */
4001  if (!this->page_buf) {
4002  this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
4003  if (!this->page_buf) {
4004  printk(KERN_ERR "%s: Can't allocate page_buf\n",
4005  __func__);
4006  return -ENOMEM;
4007  }
4008 #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
4009  this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
4010  if (!this->verify_buf) {
4011  kfree(this->page_buf);
4012  return -ENOMEM;
4013  }
4014 #endif
4015  this->options |= ONENAND_PAGEBUF_ALLOC;
4016  }
4017  if (!this->oob_buf) {
4018  this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
4019  if (!this->oob_buf) {
4020  printk(KERN_ERR "%s: Can't allocate oob_buf\n",
4021  __func__);
4022  if (this->options & ONENAND_PAGEBUF_ALLOC) {
4023  this->options &= ~ONENAND_PAGEBUF_ALLOC;
4024  kfree(this->page_buf);
4025  }
4026  return -ENOMEM;
4027  }
4028  this->options |= ONENAND_OOBBUF_ALLOC;
4029  }
4030 
4031  this->state = FL_READY;
4032  init_waitqueue_head(&this->wq);
4033  spin_lock_init(&this->chip_lock);
4034 
4035  /*
4036  * Allow subpage writes up to oobsize.
4037  */
4038  switch (mtd->oobsize) {
4039  case 128:
4040  if (FLEXONENAND(this)) {
4041  this->ecclayout = &flexonenand_oob_128;
4042  mtd->subpage_sft = 0;
4043  } else {
4044  this->ecclayout = &onenand_oob_128;
4045  mtd->subpage_sft = 2;
4046  }
4047  if (ONENAND_IS_NOP_1(this))
4048  mtd->subpage_sft = 0;
4049  break;
4050  case 64:
4051  this->ecclayout = &onenand_oob_64;
4052  mtd->subpage_sft = 2;
4053  break;
4054 
4055  case 32:
4056  this->ecclayout = &onenand_oob_32;
4057  mtd->subpage_sft = 1;
4058  break;
4059 
4060  default:
4061  printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
4062  __func__, mtd->oobsize);
4063  mtd->subpage_sft = 0;
4064  /* To prevent kernel oops */
4065  this->ecclayout = &onenand_oob_32;
4066  break;
4067  }
4068 
4069  this->subpagesize = mtd->writesize >> mtd->subpage_sft;
4070 
4071  /*
4072  * The number of bytes available for a client to place data into
4073  * the out of band area
4074  */
4075  this->ecclayout->oobavail = 0;
4076  for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
4077  this->ecclayout->oobfree[i].length; i++)
4078  this->ecclayout->oobavail +=
4079  this->ecclayout->oobfree[i].length;
4080  mtd->oobavail = this->ecclayout->oobavail;
4081 
4082  mtd->ecclayout = this->ecclayout;
4083  mtd->ecc_strength = 1;
4084 
4085  /* Fill in remaining MTD driver data */
4087  mtd->flags = MTD_CAP_NANDFLASH;
4088  mtd->_erase = onenand_erase;
4089  mtd->_point = NULL;
4090  mtd->_unpoint = NULL;
4091  mtd->_read = onenand_read;
4092  mtd->_write = onenand_write;
4093  mtd->_read_oob = onenand_read_oob;
4094  mtd->_write_oob = onenand_write_oob;
4095  mtd->_panic_write = onenand_panic_write;
4096 #ifdef CONFIG_MTD_ONENAND_OTP
4097  mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
4098  mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
4099  mtd->_get_user_prot_info = onenand_get_user_prot_info;
4100  mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
4101  mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
4102  mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
4103 #endif
4104  mtd->_sync = onenand_sync;
4105  mtd->_lock = onenand_lock;
4106  mtd->_unlock = onenand_unlock;
4107  mtd->_suspend = onenand_suspend;
4108  mtd->_resume = onenand_resume;
4109  mtd->_block_isbad = onenand_block_isbad;
4110  mtd->_block_markbad = onenand_block_markbad;
4111  mtd->owner = THIS_MODULE;
4112  mtd->writebufsize = mtd->writesize;
4113 
4114  /* Unlock whole block */
4115  if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
4116  this->unlock_all(mtd);
4117 
4118  ret = this->scan_bbt(mtd);
4119  if ((!FLEXONENAND(this)) || ret)
4120  return ret;
4121 
4122  /* Change Flex-OneNAND boundaries if required */
4123  for (i = 0; i < MAX_DIES; i++)
4124  flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
4125  flex_bdry[(2 * i) + 1]);
4126 
4127  return 0;
4128 }
4129 
4134 void onenand_release(struct mtd_info *mtd)
4135 {
4136  struct onenand_chip *this = mtd->priv;
4137 
4138  /* Deregister partitions */
4139  mtd_device_unregister(mtd);
4140 
4141  /* Free bad block table memory, if allocated */
4142  if (this->bbm) {
4143  struct bbm_info *bbm = this->bbm;
4144  kfree(bbm->bbt);
4145  kfree(this->bbm);
4146  }
4147  /* Buffers allocated by onenand_scan */
4148  if (this->options & ONENAND_PAGEBUF_ALLOC) {
4149  kfree(this->page_buf);
4150 #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
4151  kfree(this->verify_buf);
4152 #endif
4153  }
4154  if (this->options & ONENAND_OOBBUF_ALLOC)
4155  kfree(this->oob_buf);
4156  kfree(mtd->eraseregions);
4157 }
4158 
4161 
4162 MODULE_LICENSE("GPL");
4163 MODULE_AUTHOR("Kyungmin Park <[email protected]>");
4164 MODULE_DESCRIPTION("Generic OneNAND flash driver code");