Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
omap2.c
Go to the documentation of this file.
1 /*
2  * Copyright © 2004 Texas Instruments, Jian Zhang <[email protected]>
3  * Copyright © 2004 Micron Technology Inc.
4  * Copyright © 2004 David Brownell
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/platform_device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/delay.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/jiffies.h>
18 #include <linux/sched.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/omap-dma.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 
26 #ifdef CONFIG_MTD_NAND_OMAP_BCH
27 #include <linux/bch.h>
28 #endif
29 
30 #include <plat/dma.h>
31 #include <plat/gpmc.h>
33 
34 #define DRIVER_NAME "omap2-nand"
35 #define OMAP_NAND_TIMEOUT_MS 5000
36 
37 #define NAND_Ecc_P1e (1 << 0)
38 #define NAND_Ecc_P2e (1 << 1)
39 #define NAND_Ecc_P4e (1 << 2)
40 #define NAND_Ecc_P8e (1 << 3)
41 #define NAND_Ecc_P16e (1 << 4)
42 #define NAND_Ecc_P32e (1 << 5)
43 #define NAND_Ecc_P64e (1 << 6)
44 #define NAND_Ecc_P128e (1 << 7)
45 #define NAND_Ecc_P256e (1 << 8)
46 #define NAND_Ecc_P512e (1 << 9)
47 #define NAND_Ecc_P1024e (1 << 10)
48 #define NAND_Ecc_P2048e (1 << 11)
49 
50 #define NAND_Ecc_P1o (1 << 16)
51 #define NAND_Ecc_P2o (1 << 17)
52 #define NAND_Ecc_P4o (1 << 18)
53 #define NAND_Ecc_P8o (1 << 19)
54 #define NAND_Ecc_P16o (1 << 20)
55 #define NAND_Ecc_P32o (1 << 21)
56 #define NAND_Ecc_P64o (1 << 22)
57 #define NAND_Ecc_P128o (1 << 23)
58 #define NAND_Ecc_P256o (1 << 24)
59 #define NAND_Ecc_P512o (1 << 25)
60 #define NAND_Ecc_P1024o (1 << 26)
61 #define NAND_Ecc_P2048o (1 << 27)
62 
63 #define TF(value) (value ? 1 : 0)
64 
65 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
66 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
67 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
68 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
69 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
70 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
71 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
72 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
73 
74 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
75 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
76 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
77 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
78 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
79 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
80 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
81 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
82 
83 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
84 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
85 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
86 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
87 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
88 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
89 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
90 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
91 
92 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
93 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
94 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
95 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
96 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
97 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
98 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
99 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
100 
101 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
102 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
103 
104 #define PREFETCH_CONFIG1_CS_SHIFT 24
105 #define ECC_CONFIG_CS_SHIFT 1
106 #define CS_MASK 0x7
107 #define ENABLE_PREFETCH (0x1 << 7)
108 #define DMA_MPU_MODE_SHIFT 2
109 #define ECCSIZE1_SHIFT 22
110 #define ECC1RESULTSIZE 0x1
111 #define ECCCLEAR 0x100
112 #define ECC1 0x1
113 
114 /* oob info generated runtime depending on ecc algorithm and layout selected */
115 static struct nand_ecclayout omap_oobinfo;
116 /* Define some generic bad / good block scan pattern which are used
117  * while scanning a device for factory marked good / bad blocks
118  */
119 static uint8_t scan_ff_pattern[] = { 0xff };
120 static struct nand_bbt_descr bb_descrip_flashbased = {
122  .offs = 0,
123  .len = 1,
124  .pattern = scan_ff_pattern,
125 };
126 
127 
131  struct mtd_info mtd;
132  struct nand_chip nand;
134 
135  int gpmc_cs;
136  unsigned long phys_base;
137  unsigned long mem_size;
138  struct completion comp;
139  struct dma_chan *dma;
142  enum {
143  OMAP_NAND_IO_READ = 0, /* read */
144  OMAP_NAND_IO_WRITE, /* write */
145  } iomode;
147  int buf_len;
149 
150 #ifdef CONFIG_MTD_NAND_OMAP_BCH
151  struct bch_control *bch;
152  struct nand_ecclayout ecclayout;
153 #endif
154 };
155 
164 static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
165  unsigned int u32_count, int is_write, struct omap_nand_info *info)
166 {
167  u32 val;
168 
169  if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
170  return -1;
171 
172  if (readl(info->reg.gpmc_prefetch_control))
173  return -EBUSY;
174 
175  /* Set the amount of bytes to be prefetched */
176  writel(u32_count, info->reg.gpmc_prefetch_config2);
177 
178  /* Set dma/mpu mode, the prefetch read / post write and
179  * enable the engine. Set which cs is has requested for.
180  */
181  val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
183  (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
184  writel(val, info->reg.gpmc_prefetch_config1);
185 
186  /* Start the prefetch engine */
187  writel(0x1, info->reg.gpmc_prefetch_control);
188 
189  return 0;
190 }
191 
195 static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
196 {
197  u32 config1;
198 
199  /* check if the same module/cs is trying to reset */
200  config1 = readl(info->reg.gpmc_prefetch_config1);
201  if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
202  return -EINVAL;
203 
204  /* Stop the PFPW engine */
205  writel(0x0, info->reg.gpmc_prefetch_control);
206 
207  /* Reset/disable the PFPW engine */
208  writel(0x0, info->reg.gpmc_prefetch_config1);
209 
210  return 0;
211 }
212 
224 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
225 {
226  struct omap_nand_info *info = container_of(mtd,
227  struct omap_nand_info, mtd);
228 
229  if (cmd != NAND_CMD_NONE) {
230  if (ctrl & NAND_CLE)
231  writeb(cmd, info->reg.gpmc_nand_command);
232 
233  else if (ctrl & NAND_ALE)
234  writeb(cmd, info->reg.gpmc_nand_address);
235 
236  else /* NAND_NCE */
237  writeb(cmd, info->reg.gpmc_nand_data);
238  }
239 }
240 
247 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
248 {
249  struct nand_chip *nand = mtd->priv;
250 
251  ioread8_rep(nand->IO_ADDR_R, buf, len);
252 }
253 
260 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
261 {
262  struct omap_nand_info *info = container_of(mtd,
263  struct omap_nand_info, mtd);
264  u_char *p = (u_char *)buf;
265  u32 status = 0;
266 
267  while (len--) {
268  iowrite8(*p++, info->nand.IO_ADDR_W);
269  /* wait until buffer is available for write */
270  do {
271  status = readl(info->reg.gpmc_status) &
273  } while (!status);
274  }
275 }
276 
283 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
284 {
285  struct nand_chip *nand = mtd->priv;
286 
287  ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
288 }
289 
296 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
297 {
298  struct omap_nand_info *info = container_of(mtd,
299  struct omap_nand_info, mtd);
300  u16 *p = (u16 *) buf;
301  u32 status = 0;
302  /* FIXME try bursts of writesw() or DMA ... */
303  len >>= 1;
304 
305  while (len--) {
306  iowrite16(*p++, info->nand.IO_ADDR_W);
307  /* wait until buffer is available for write */
308  do {
309  status = readl(info->reg.gpmc_status) &
311  } while (!status);
312  }
313 }
314 
321 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
322 {
323  struct omap_nand_info *info = container_of(mtd,
324  struct omap_nand_info, mtd);
325  uint32_t r_count = 0;
326  int ret = 0;
327  u32 *p = (u32 *)buf;
328 
329  /* take care of subpage reads */
330  if (len % 4) {
331  if (info->nand.options & NAND_BUSWIDTH_16)
332  omap_read_buf16(mtd, buf, len % 4);
333  else
334  omap_read_buf8(mtd, buf, len % 4);
335  p = (u32 *) (buf + len % 4);
336  len -= len % 4;
337  }
338 
339  /* configure and start prefetch transfer */
340  ret = omap_prefetch_enable(info->gpmc_cs,
341  PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
342  if (ret) {
343  /* PFPW engine is busy, use cpu copy method */
344  if (info->nand.options & NAND_BUSWIDTH_16)
345  omap_read_buf16(mtd, (u_char *)p, len);
346  else
347  omap_read_buf8(mtd, (u_char *)p, len);
348  } else {
349  do {
350  r_count = readl(info->reg.gpmc_prefetch_status);
351  r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
352  r_count = r_count >> 2;
353  ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
354  p += r_count;
355  len -= r_count << 2;
356  } while (len);
357  /* disable and stop the PFPW engine */
358  omap_prefetch_reset(info->gpmc_cs, info);
359  }
360 }
361 
368 static void omap_write_buf_pref(struct mtd_info *mtd,
369  const u_char *buf, int len)
370 {
371  struct omap_nand_info *info = container_of(mtd,
372  struct omap_nand_info, mtd);
373  uint32_t w_count = 0;
374  int i = 0, ret = 0;
375  u16 *p = (u16 *)buf;
376  unsigned long tim, limit;
377  u32 val;
378 
379  /* take care of subpage writes */
380  if (len % 2 != 0) {
381  writeb(*buf, info->nand.IO_ADDR_W);
382  p = (u16 *)(buf + 1);
383  len--;
384  }
385 
386  /* configure and start prefetch transfer */
387  ret = omap_prefetch_enable(info->gpmc_cs,
388  PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
389  if (ret) {
390  /* PFPW engine is busy, use cpu copy method */
391  if (info->nand.options & NAND_BUSWIDTH_16)
392  omap_write_buf16(mtd, (u_char *)p, len);
393  else
394  omap_write_buf8(mtd, (u_char *)p, len);
395  } else {
396  while (len) {
397  w_count = readl(info->reg.gpmc_prefetch_status);
398  w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
399  w_count = w_count >> 1;
400  for (i = 0; (i < w_count) && len; i++, len -= 2)
401  iowrite16(*p++, info->nand.IO_ADDR_W);
402  }
403  /* wait for data to flushed-out before reset the prefetch */
404  tim = 0;
405  limit = (loops_per_jiffy *
407  do {
408  cpu_relax();
409  val = readl(info->reg.gpmc_prefetch_status);
410  val = GPMC_PREFETCH_STATUS_COUNT(val);
411  } while (val && (tim++ < limit));
412 
413  /* disable and stop the PFPW engine */
414  omap_prefetch_reset(info->gpmc_cs, info);
415  }
416 }
417 
418 /*
419  * omap_nand_dma_callback: callback on the completion of dma transfer
420  * @data: pointer to completion data structure
421  */
422 static void omap_nand_dma_callback(void *data)
423 {
424  complete((struct completion *) data);
425 }
426 
427 /*
428  * omap_nand_dma_transfer: configure and start dma transfer
429  * @mtd: MTD device structure
430  * @addr: virtual address in RAM of source/destination
431  * @len: number of data bytes to be transferred
432  * @is_write: flag for read/write operation
433  */
434 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
435  unsigned int len, int is_write)
436 {
437  struct omap_nand_info *info = container_of(mtd,
438  struct omap_nand_info, mtd);
439  struct dma_async_tx_descriptor *tx;
440  enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
442  struct scatterlist sg;
443  unsigned long tim, limit;
444  unsigned n;
445  int ret;
446  u32 val;
447 
448  if (addr >= high_memory) {
449  struct page *p1;
450 
451  if (((size_t)addr & PAGE_MASK) !=
452  ((size_t)(addr + len - 1) & PAGE_MASK))
453  goto out_copy;
454  p1 = vmalloc_to_page(addr);
455  if (!p1)
456  goto out_copy;
457  addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
458  }
459 
460  sg_init_one(&sg, addr, len);
461  n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
462  if (n == 0) {
463  dev_err(&info->pdev->dev,
464  "Couldn't DMA map a %d byte buffer\n", len);
465  goto out_copy;
466  }
467 
468  tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
469  is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
471  if (!tx)
472  goto out_copy_unmap;
473 
474  tx->callback = omap_nand_dma_callback;
475  tx->callback_param = &info->comp;
476  dmaengine_submit(tx);
477 
478  /* configure and start prefetch transfer */
479  ret = omap_prefetch_enable(info->gpmc_cs,
480  PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
481  if (ret)
482  /* PFPW engine is busy, use cpu copy method */
483  goto out_copy_unmap;
484 
485  init_completion(&info->comp);
486  dma_async_issue_pending(info->dma);
487 
488  /* setup and start DMA using dma_addr */
489  wait_for_completion(&info->comp);
490  tim = 0;
492 
493  do {
494  cpu_relax();
495  val = readl(info->reg.gpmc_prefetch_status);
496  val = GPMC_PREFETCH_STATUS_COUNT(val);
497  } while (val && (tim++ < limit));
498 
499  /* disable and stop the PFPW engine */
500  omap_prefetch_reset(info->gpmc_cs, info);
501 
502  dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
503  return 0;
504 
505 out_copy_unmap:
506  dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
507 out_copy:
508  if (info->nand.options & NAND_BUSWIDTH_16)
509  is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
510  : omap_write_buf16(mtd, (u_char *) addr, len);
511  else
512  is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
513  : omap_write_buf8(mtd, (u_char *) addr, len);
514  return 0;
515 }
516 
523 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
524 {
525  if (len <= mtd->oobsize)
526  omap_read_buf_pref(mtd, buf, len);
527  else
528  /* start transfer in DMA mode */
529  omap_nand_dma_transfer(mtd, buf, len, 0x0);
530 }
531 
538 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
539  const u_char *buf, int len)
540 {
541  if (len <= mtd->oobsize)
542  omap_write_buf_pref(mtd, buf, len);
543  else
544  /* start transfer in DMA mode */
545  omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
546 }
547 
548 /*
549  * omap_nand_irq - GPMC irq handler
550  * @this_irq: gpmc irq number
551  * @dev: omap_nand_info structure pointer is passed here
552  */
553 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
554 {
555  struct omap_nand_info *info = (struct omap_nand_info *) dev;
556  u32 bytes;
557 
558  bytes = readl(info->reg.gpmc_prefetch_status);
559  bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
560  bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
561  if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
562  if (this_irq == info->gpmc_irq_count)
563  goto done;
564 
565  if (info->buf_len && (info->buf_len < bytes))
566  bytes = info->buf_len;
567  else if (!info->buf_len)
568  bytes = 0;
569  iowrite32_rep(info->nand.IO_ADDR_W,
570  (u32 *)info->buf, bytes >> 2);
571  info->buf = info->buf + bytes;
572  info->buf_len -= bytes;
573 
574  } else {
575  ioread32_rep(info->nand.IO_ADDR_R,
576  (u32 *)info->buf, bytes >> 2);
577  info->buf = info->buf + bytes;
578 
579  if (this_irq == info->gpmc_irq_count)
580  goto done;
581  }
582 
583  return IRQ_HANDLED;
584 
585 done:
586  complete(&info->comp);
587 
590 
591  return IRQ_HANDLED;
592 }
593 
594 /*
595  * omap_read_buf_irq_pref - read data from NAND controller into buffer
596  * @mtd: MTD device structure
597  * @buf: buffer to store date
598  * @len: number of bytes to read
599  */
600 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
601 {
602  struct omap_nand_info *info = container_of(mtd,
603  struct omap_nand_info, mtd);
604  int ret = 0;
605 
606  if (len <= mtd->oobsize) {
607  omap_read_buf_pref(mtd, buf, len);
608  return;
609  }
610 
611  info->iomode = OMAP_NAND_IO_READ;
612  info->buf = buf;
613  init_completion(&info->comp);
614 
615  /* configure and start prefetch transfer */
616  ret = omap_prefetch_enable(info->gpmc_cs,
617  PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
618  if (ret)
619  /* PFPW engine is busy, use cpu copy method */
620  goto out_copy;
621 
622  info->buf_len = len;
623 
624  enable_irq(info->gpmc_irq_count);
625  enable_irq(info->gpmc_irq_fifo);
626 
627  /* waiting for read to complete */
628  wait_for_completion(&info->comp);
629 
630  /* disable and stop the PFPW engine */
631  omap_prefetch_reset(info->gpmc_cs, info);
632  return;
633 
634 out_copy:
635  if (info->nand.options & NAND_BUSWIDTH_16)
636  omap_read_buf16(mtd, buf, len);
637  else
638  omap_read_buf8(mtd, buf, len);
639 }
640 
641 /*
642  * omap_write_buf_irq_pref - write buffer to NAND controller
643  * @mtd: MTD device structure
644  * @buf: data buffer
645  * @len: number of bytes to write
646  */
647 static void omap_write_buf_irq_pref(struct mtd_info *mtd,
648  const u_char *buf, int len)
649 {
650  struct omap_nand_info *info = container_of(mtd,
651  struct omap_nand_info, mtd);
652  int ret = 0;
653  unsigned long tim, limit;
654  u32 val;
655 
656  if (len <= mtd->oobsize) {
657  omap_write_buf_pref(mtd, buf, len);
658  return;
659  }
660 
661  info->iomode = OMAP_NAND_IO_WRITE;
662  info->buf = (u_char *) buf;
663  init_completion(&info->comp);
664 
665  /* configure and start prefetch transfer : size=24 */
666  ret = omap_prefetch_enable(info->gpmc_cs,
667  (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
668  if (ret)
669  /* PFPW engine is busy, use cpu copy method */
670  goto out_copy;
671 
672  info->buf_len = len;
673 
674  enable_irq(info->gpmc_irq_count);
675  enable_irq(info->gpmc_irq_fifo);
676 
677  /* waiting for write to complete */
678  wait_for_completion(&info->comp);
679 
680  /* wait for data to flushed-out before reset the prefetch */
681  tim = 0;
683  do {
684  val = readl(info->reg.gpmc_prefetch_status);
685  val = GPMC_PREFETCH_STATUS_COUNT(val);
686  cpu_relax();
687  } while (val && (tim++ < limit));
688 
689  /* disable and stop the PFPW engine */
690  omap_prefetch_reset(info->gpmc_cs, info);
691  return;
692 
693 out_copy:
694  if (info->nand.options & NAND_BUSWIDTH_16)
695  omap_write_buf16(mtd, buf, len);
696  else
697  omap_write_buf8(mtd, buf, len);
698 }
699 
707 static void gen_true_ecc(u8 *ecc_buf)
708 {
709  u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
710  ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
711 
712  ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
713  P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
714  ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
715  P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
716  ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
717  P1e(tmp) | P2048o(tmp) | P2048e(tmp));
718 }
719 
731 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
732  u8 *ecc_data2, /* read from register */
733  u8 *page_data)
734 {
735  uint i;
736  u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
737  u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
738  u8 ecc_bit[24];
739  u8 ecc_sum = 0;
740  u8 find_bit = 0;
741  uint find_byte = 0;
742  int isEccFF;
743 
744  isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
745 
746  gen_true_ecc(ecc_data1);
747  gen_true_ecc(ecc_data2);
748 
749  for (i = 0; i <= 2; i++) {
750  *(ecc_data1 + i) = ~(*(ecc_data1 + i));
751  *(ecc_data2 + i) = ~(*(ecc_data2 + i));
752  }
753 
754  for (i = 0; i < 8; i++) {
755  tmp0_bit[i] = *ecc_data1 % 2;
756  *ecc_data1 = *ecc_data1 / 2;
757  }
758 
759  for (i = 0; i < 8; i++) {
760  tmp1_bit[i] = *(ecc_data1 + 1) % 2;
761  *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
762  }
763 
764  for (i = 0; i < 8; i++) {
765  tmp2_bit[i] = *(ecc_data1 + 2) % 2;
766  *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
767  }
768 
769  for (i = 0; i < 8; i++) {
770  comp0_bit[i] = *ecc_data2 % 2;
771  *ecc_data2 = *ecc_data2 / 2;
772  }
773 
774  for (i = 0; i < 8; i++) {
775  comp1_bit[i] = *(ecc_data2 + 1) % 2;
776  *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
777  }
778 
779  for (i = 0; i < 8; i++) {
780  comp2_bit[i] = *(ecc_data2 + 2) % 2;
781  *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
782  }
783 
784  for (i = 0; i < 6; i++)
785  ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
786 
787  for (i = 0; i < 8; i++)
788  ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
789 
790  for (i = 0; i < 8; i++)
791  ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
792 
793  ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
794  ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
795 
796  for (i = 0; i < 24; i++)
797  ecc_sum += ecc_bit[i];
798 
799  switch (ecc_sum) {
800  case 0:
801  /* Not reached because this function is not called if
802  * ECC values are equal
803  */
804  return 0;
805 
806  case 1:
807  /* Uncorrectable error */
808  pr_debug("ECC UNCORRECTED_ERROR 1\n");
809  return -1;
810 
811  case 11:
812  /* UN-Correctable error */
813  pr_debug("ECC UNCORRECTED_ERROR B\n");
814  return -1;
815 
816  case 12:
817  /* Correctable error */
818  find_byte = (ecc_bit[23] << 8) +
819  (ecc_bit[21] << 7) +
820  (ecc_bit[19] << 6) +
821  (ecc_bit[17] << 5) +
822  (ecc_bit[15] << 4) +
823  (ecc_bit[13] << 3) +
824  (ecc_bit[11] << 2) +
825  (ecc_bit[9] << 1) +
826  ecc_bit[7];
827 
828  find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
829 
830  pr_debug("Correcting single bit ECC error at offset: "
831  "%d, bit: %d\n", find_byte, find_bit);
832 
833  page_data[find_byte] ^= (1 << find_bit);
834 
835  return 1;
836  default:
837  if (isEccFF) {
838  if (ecc_data2[0] == 0 &&
839  ecc_data2[1] == 0 &&
840  ecc_data2[2] == 0)
841  return 0;
842  }
843  pr_debug("UNCORRECTED_ERROR default\n");
844  return -1;
845  }
846 }
847 
862 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
863  u_char *read_ecc, u_char *calc_ecc)
864 {
865  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
866  mtd);
867  int blockCnt = 0, i = 0, ret = 0;
868  int stat = 0;
869 
870  /* Ex NAND_ECC_HW12_2048 */
871  if ((info->nand.ecc.mode == NAND_ECC_HW) &&
872  (info->nand.ecc.size == 2048))
873  blockCnt = 4;
874  else
875  blockCnt = 1;
876 
877  for (i = 0; i < blockCnt; i++) {
878  if (memcmp(read_ecc, calc_ecc, 3) != 0) {
879  ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
880  if (ret < 0)
881  return ret;
882  /* keep track of the number of corrected errors */
883  stat += ret;
884  }
885  read_ecc += 3;
886  calc_ecc += 3;
887  dat += 512;
888  }
889  return stat;
890 }
891 
904 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
905  u_char *ecc_code)
906 {
907  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
908  mtd);
909  u32 val;
910 
911  val = readl(info->reg.gpmc_ecc_config);
912  if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
913  return -EINVAL;
914 
915  /* read ecc result */
916  val = readl(info->reg.gpmc_ecc1_result);
917  *ecc_code++ = val; /* P128e, ..., P1e */
918  *ecc_code++ = val >> 16; /* P128o, ..., P1o */
919  /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
920  *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
921 
922  return 0;
923 }
924 
930 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
931 {
932  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
933  mtd);
934  struct nand_chip *chip = mtd->priv;
935  unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
936  u32 val;
937 
938  /* clear ecc and enable bits */
939  val = ECCCLEAR | ECC1;
940  writel(val, info->reg.gpmc_ecc_control);
941 
942  /* program ecc and result sizes */
943  val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
945  writel(val, info->reg.gpmc_ecc_size_config);
946 
947  switch (mode) {
948  case NAND_ECC_READ:
949  case NAND_ECC_WRITE:
950  writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
951  break;
952  case NAND_ECC_READSYN:
953  writel(ECCCLEAR, info->reg.gpmc_ecc_control);
954  break;
955  default:
956  dev_info(&info->pdev->dev,
957  "error: unrecognized Mode[%d]!\n", mode);
958  break;
959  }
960 
961  /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
962  val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
963  writel(val, info->reg.gpmc_ecc_config);
964 }
965 
978 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
979 {
980  struct nand_chip *this = mtd->priv;
981  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
982  mtd);
983  unsigned long timeo = jiffies;
984  int status, state = this->state;
985 
986  if (state == FL_ERASING)
987  timeo += (HZ * 400) / 1000;
988  else
989  timeo += (HZ * 20) / 1000;
990 
991  writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
992  while (time_before(jiffies, timeo)) {
993  status = readb(info->reg.gpmc_nand_data);
994  if (status & NAND_STATUS_READY)
995  break;
996  cond_resched();
997  }
998 
999  status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
1000  return status;
1001 }
1002 
1007 static int omap_dev_ready(struct mtd_info *mtd)
1008 {
1009  unsigned int val = 0;
1010  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1011  mtd);
1012 
1013  val = readl(info->reg.gpmc_status);
1014 
1015  if ((val & 0x100) == 0x100) {
1016  return 1;
1017  } else {
1018  return 0;
1019  }
1020 }
1021 
1022 #ifdef CONFIG_MTD_NAND_OMAP_BCH
1023 
1029 static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1030 {
1031  int nerrors;
1032  unsigned int dev_width;
1033  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1034  mtd);
1035  struct nand_chip *chip = mtd->priv;
1036 
1037  nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
1038  dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1039  /*
1040  * Program GPMC to perform correction on one 512-byte sector at a time.
1041  * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
1042  * gives a slight (5%) performance gain (but requires additional code).
1043  */
1044  (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
1045 }
1046 
1053 static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
1054  u_char *ecc_code)
1055 {
1056  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1057  mtd);
1058  return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
1059 }
1060 
1067 static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
1068  u_char *ecc_code)
1069 {
1070  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1071  mtd);
1072  return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
1073 }
1074 
1082 static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1083  u_char *read_ecc, u_char *calc_ecc)
1084 {
1085  int i, count;
1086  /* cannot correct more than 8 errors */
1087  unsigned int errloc[8];
1088  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1089  mtd);
1090 
1091  count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1092  errloc);
1093  if (count > 0) {
1094  /* correct errors */
1095  for (i = 0; i < count; i++) {
1096  /* correct data only, not ecc bytes */
1097  if (errloc[i] < 8*512)
1098  data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1099  pr_debug("corrected bitflip %u\n", errloc[i]);
1100  }
1101  } else if (count < 0) {
1102  pr_err("ecc unrecoverable error\n");
1103  }
1104  return count;
1105 }
1106 
1111 static void omap3_free_bch(struct mtd_info *mtd)
1112 {
1113  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1114  mtd);
1115  if (info->bch) {
1116  free_bch(info->bch);
1117  info->bch = NULL;
1118  }
1119 }
1120 
1126 static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1127 {
1128  int ret, max_errors;
1129  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1130  mtd);
1131 #ifdef CONFIG_MTD_NAND_OMAP_BCH8
1132  const int hw_errors = 8;
1133 #else
1134  const int hw_errors = 4;
1135 #endif
1136  info->bch = NULL;
1137 
1138  max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
1139  if (max_errors != hw_errors) {
1140  pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1141  max_errors, hw_errors);
1142  goto fail;
1143  }
1144 
1145  /* initialize GPMC BCH engine */
1146  ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
1147  if (ret)
1148  goto fail;
1149 
1150  /* software bch library is only used to detect and locate errors */
1151  info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
1152  if (!info->bch)
1153  goto fail;
1154 
1155  info->nand.ecc.size = 512;
1156  info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1157  info->nand.ecc.correct = omap3_correct_data_bch;
1158  info->nand.ecc.mode = NAND_ECC_HW;
1159 
1160  /*
1161  * The number of corrected errors in an ecc block that will trigger
1162  * block scrubbing defaults to the ecc strength (4 or 8).
1163  * Set mtd->bitflip_threshold here to define a custom threshold.
1164  */
1165 
1166  if (max_errors == 8) {
1167  info->nand.ecc.strength = 8;
1168  info->nand.ecc.bytes = 13;
1169  info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1170  } else {
1171  info->nand.ecc.strength = 4;
1172  info->nand.ecc.bytes = 7;
1173  info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1174  }
1175 
1176  pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1177  return 0;
1178 fail:
1179  omap3_free_bch(mtd);
1180  return -1;
1181 }
1182 
1187 static int omap3_init_bch_tail(struct mtd_info *mtd)
1188 {
1189  int i, steps;
1190  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1191  mtd);
1192  struct nand_ecclayout *layout = &info->ecclayout;
1193 
1194  /* build oob layout */
1195  steps = mtd->writesize/info->nand.ecc.size;
1196  layout->eccbytes = steps*info->nand.ecc.bytes;
1197 
1198  /* do not bother creating special oob layouts for small page devices */
1199  if (mtd->oobsize < 64) {
1200  pr_err("BCH ecc is not supported on small page devices\n");
1201  goto fail;
1202  }
1203 
1204  /* reserve 2 bytes for bad block marker */
1205  if (layout->eccbytes+2 > mtd->oobsize) {
1206  pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1207  mtd->oobsize, layout->eccbytes);
1208  goto fail;
1209  }
1210 
1211  /* put ecc bytes at oob tail */
1212  for (i = 0; i < layout->eccbytes; i++)
1213  layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
1214 
1215  layout->oobfree[0].offset = 2;
1216  layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1217  info->nand.ecc.layout = layout;
1218 
1219  if (!(info->nand.options & NAND_BUSWIDTH_16))
1220  info->nand.badblock_pattern = &bb_descrip_flashbased;
1221  return 0;
1222 fail:
1223  omap3_free_bch(mtd);
1224  return -1;
1225 }
1226 
1227 #else
1228 static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1229 {
1230  pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1231  return -1;
1232 }
1233 static int omap3_init_bch_tail(struct mtd_info *mtd)
1234 {
1235  return -1;
1236 }
1237 static void omap3_free_bch(struct mtd_info *mtd)
1238 {
1239 }
1240 #endif /* CONFIG_MTD_NAND_OMAP_BCH */
1241 
1242 static int __devinit omap_nand_probe(struct platform_device *pdev)
1243 {
1244  struct omap_nand_info *info;
1246  int err;
1247  int i, offset;
1249  unsigned sig;
1250  struct resource *res;
1251 
1252  pdata = pdev->dev.platform_data;
1253  if (pdata == NULL) {
1254  dev_err(&pdev->dev, "platform data missing\n");
1255  return -ENODEV;
1256  }
1257 
1258  info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
1259  if (!info)
1260  return -ENOMEM;
1261 
1262  platform_set_drvdata(pdev, info);
1263 
1264  spin_lock_init(&info->controller.lock);
1265  init_waitqueue_head(&info->controller.wq);
1266 
1267  info->pdev = pdev;
1268 
1269  info->gpmc_cs = pdata->cs;
1270  info->reg = pdata->reg;
1271 
1272  info->mtd.priv = &info->nand;
1273  info->mtd.name = dev_name(&pdev->dev);
1274  info->mtd.owner = THIS_MODULE;
1275 
1276  info->nand.options = pdata->devsize;
1277  info->nand.options |= NAND_SKIP_BBTSCAN;
1278 
1279  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1280  if (res == NULL) {
1281  err = -EINVAL;
1282  dev_err(&pdev->dev, "error getting memory resource\n");
1283  goto out_free_info;
1284  }
1285 
1286  info->phys_base = res->start;
1287  info->mem_size = resource_size(res);
1288 
1289  if (!request_mem_region(info->phys_base, info->mem_size,
1290  pdev->dev.driver->name)) {
1291  err = -EBUSY;
1292  goto out_free_info;
1293  }
1294 
1295  info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
1296  if (!info->nand.IO_ADDR_R) {
1297  err = -ENOMEM;
1298  goto out_release_mem_region;
1299  }
1300 
1301  info->nand.controller = &info->controller;
1302 
1303  info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
1304  info->nand.cmd_ctrl = omap_hwcontrol;
1305 
1306  /*
1307  * If RDY/BSY line is connected to OMAP then use the omap ready
1308  * function and the generic nand_wait function which reads the status
1309  * register after monitoring the RDY/BSY line. Otherwise use a standard
1310  * chip delay which is slightly more than tR (AC Timing) of the NAND
1311  * device and read status register until you get a failure or success
1312  */
1313  if (pdata->dev_ready) {
1314  info->nand.dev_ready = omap_dev_ready;
1315  info->nand.chip_delay = 0;
1316  } else {
1317  info->nand.waitfunc = omap_wait;
1318  info->nand.chip_delay = 50;
1319  }
1320 
1321  switch (pdata->xfer_type) {
1323  info->nand.read_buf = omap_read_buf_pref;
1324  info->nand.write_buf = omap_write_buf_pref;
1325  break;
1326 
1327  case NAND_OMAP_POLLED:
1328  if (info->nand.options & NAND_BUSWIDTH_16) {
1329  info->nand.read_buf = omap_read_buf16;
1330  info->nand.write_buf = omap_write_buf16;
1331  } else {
1332  info->nand.read_buf = omap_read_buf8;
1333  info->nand.write_buf = omap_write_buf8;
1334  }
1335  break;
1336 
1338  dma_cap_zero(mask);
1339  dma_cap_set(DMA_SLAVE, mask);
1340  sig = OMAP24XX_DMA_GPMC;
1341  info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1342  if (!info->dma) {
1343  dev_err(&pdev->dev, "DMA engine request failed\n");
1344  err = -ENXIO;
1345  goto out_release_mem_region;
1346  } else {
1347  struct dma_slave_config cfg;
1348 
1349  memset(&cfg, 0, sizeof(cfg));
1350  cfg.src_addr = info->phys_base;
1351  cfg.dst_addr = info->phys_base;
1352  cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1353  cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1354  cfg.src_maxburst = 16;
1355  cfg.dst_maxburst = 16;
1356  err = dmaengine_slave_config(info->dma, &cfg);
1357  if (err) {
1358  dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1359  err);
1360  goto out_release_mem_region;
1361  }
1362  info->nand.read_buf = omap_read_buf_dma_pref;
1363  info->nand.write_buf = omap_write_buf_dma_pref;
1364  }
1365  break;
1366 
1368  info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
1369  if (info->gpmc_irq_fifo <= 0) {
1370  dev_err(&pdev->dev, "error getting fifo irq\n");
1371  err = -ENODEV;
1372  goto out_release_mem_region;
1373  }
1374  err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
1375  IRQF_SHARED, "gpmc-nand-fifo", info);
1376  if (err) {
1377  dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1378  info->gpmc_irq_fifo, err);
1379  info->gpmc_irq_fifo = 0;
1380  goto out_release_mem_region;
1381  }
1382 
1383  info->gpmc_irq_count = platform_get_irq(pdev, 1);
1384  if (info->gpmc_irq_count <= 0) {
1385  dev_err(&pdev->dev, "error getting count irq\n");
1386  err = -ENODEV;
1387  goto out_release_mem_region;
1388  }
1389  err = request_irq(info->gpmc_irq_count, omap_nand_irq,
1390  IRQF_SHARED, "gpmc-nand-count", info);
1391  if (err) {
1392  dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1393  info->gpmc_irq_count, err);
1394  info->gpmc_irq_count = 0;
1395  goto out_release_mem_region;
1396  }
1397 
1398  info->nand.read_buf = omap_read_buf_irq_pref;
1399  info->nand.write_buf = omap_write_buf_irq_pref;
1400 
1401  break;
1402 
1403  default:
1404  dev_err(&pdev->dev,
1405  "xfer_type(%d) not supported!\n", pdata->xfer_type);
1406  err = -EINVAL;
1407  goto out_release_mem_region;
1408  }
1409 
1410  /* select the ecc type */
1411  if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1412  info->nand.ecc.mode = NAND_ECC_SOFT;
1413  else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1415  info->nand.ecc.bytes = 3;
1416  info->nand.ecc.size = 512;
1417  info->nand.ecc.strength = 1;
1418  info->nand.ecc.calculate = omap_calculate_ecc;
1419  info->nand.ecc.hwctl = omap_enable_hwecc;
1420  info->nand.ecc.correct = omap_correct_data;
1421  info->nand.ecc.mode = NAND_ECC_HW;
1422  } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1423  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1424  err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
1425  if (err) {
1426  err = -EINVAL;
1427  goto out_release_mem_region;
1428  }
1429  }
1430 
1431  /* DIP switches on some boards change between 8 and 16 bit
1432  * bus widths for flash. Try the other width if the first try fails.
1433  */
1434  if (nand_scan_ident(&info->mtd, 1, NULL)) {
1435  info->nand.options ^= NAND_BUSWIDTH_16;
1436  if (nand_scan_ident(&info->mtd, 1, NULL)) {
1437  err = -ENXIO;
1438  goto out_release_mem_region;
1439  }
1440  }
1441 
1442  /* rom code layout */
1443  if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1444 
1445  if (info->nand.options & NAND_BUSWIDTH_16)
1446  offset = 2;
1447  else {
1448  offset = 1;
1449  info->nand.badblock_pattern = &bb_descrip_flashbased;
1450  }
1451  omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1452  for (i = 0; i < omap_oobinfo.eccbytes; i++)
1453  omap_oobinfo.eccpos[i] = i+offset;
1454 
1455  omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1456  omap_oobinfo.oobfree->length = info->mtd.oobsize -
1457  (offset + omap_oobinfo.eccbytes);
1458 
1459  info->nand.ecc.layout = &omap_oobinfo;
1460  } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1461  (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1462  /* build OOB layout for BCH ECC correction */
1463  err = omap3_init_bch_tail(&info->mtd);
1464  if (err) {
1465  err = -EINVAL;
1466  goto out_release_mem_region;
1467  }
1468  }
1469 
1470  /* second phase scan */
1471  if (nand_scan_tail(&info->mtd)) {
1472  err = -ENXIO;
1473  goto out_release_mem_region;
1474  }
1475 
1476  mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
1477  pdata->nr_parts);
1478 
1479  platform_set_drvdata(pdev, &info->mtd);
1480 
1481  return 0;
1482 
1483 out_release_mem_region:
1484  if (info->dma)
1485  dma_release_channel(info->dma);
1486  if (info->gpmc_irq_count > 0)
1487  free_irq(info->gpmc_irq_count, info);
1488  if (info->gpmc_irq_fifo > 0)
1489  free_irq(info->gpmc_irq_fifo, info);
1490  release_mem_region(info->phys_base, info->mem_size);
1491 out_free_info:
1492  kfree(info);
1493 
1494  return err;
1495 }
1496 
1497 static int omap_nand_remove(struct platform_device *pdev)
1498 {
1499  struct mtd_info *mtd = platform_get_drvdata(pdev);
1500  struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1501  mtd);
1502  omap3_free_bch(&info->mtd);
1503 
1504  platform_set_drvdata(pdev, NULL);
1505  if (info->dma)
1506  dma_release_channel(info->dma);
1507 
1508  if (info->gpmc_irq_count > 0)
1509  free_irq(info->gpmc_irq_count, info);
1510  if (info->gpmc_irq_fifo > 0)
1511  free_irq(info->gpmc_irq_fifo, info);
1512 
1513  /* Release NAND device, its internal structures and partitions */
1514  nand_release(&info->mtd);
1515  iounmap(info->nand.IO_ADDR_R);
1517  kfree(info);
1518  return 0;
1519 }
1520 
1521 static struct platform_driver omap_nand_driver = {
1522  .probe = omap_nand_probe,
1523  .remove = omap_nand_remove,
1524  .driver = {
1525  .name = DRIVER_NAME,
1526  .owner = THIS_MODULE,
1527  },
1528 };
1529 
1530 module_platform_driver(omap_nand_driver);
1531 
1532 MODULE_ALIAS("platform:" DRIVER_NAME);
1533 MODULE_LICENSE("GPL");
1534 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");