15 #include <linux/module.h>
18 #include <linux/sched.h>
24 #include <linux/slab.h>
26 #ifdef CONFIG_MTD_NAND_OMAP_BCH
34 #define DRIVER_NAME "omap2-nand"
35 #define OMAP_NAND_TIMEOUT_MS 5000
37 #define NAND_Ecc_P1e (1 << 0)
38 #define NAND_Ecc_P2e (1 << 1)
39 #define NAND_Ecc_P4e (1 << 2)
40 #define NAND_Ecc_P8e (1 << 3)
41 #define NAND_Ecc_P16e (1 << 4)
42 #define NAND_Ecc_P32e (1 << 5)
43 #define NAND_Ecc_P64e (1 << 6)
44 #define NAND_Ecc_P128e (1 << 7)
45 #define NAND_Ecc_P256e (1 << 8)
46 #define NAND_Ecc_P512e (1 << 9)
47 #define NAND_Ecc_P1024e (1 << 10)
48 #define NAND_Ecc_P2048e (1 << 11)
50 #define NAND_Ecc_P1o (1 << 16)
51 #define NAND_Ecc_P2o (1 << 17)
52 #define NAND_Ecc_P4o (1 << 18)
53 #define NAND_Ecc_P8o (1 << 19)
54 #define NAND_Ecc_P16o (1 << 20)
55 #define NAND_Ecc_P32o (1 << 21)
56 #define NAND_Ecc_P64o (1 << 22)
57 #define NAND_Ecc_P128o (1 << 23)
58 #define NAND_Ecc_P256o (1 << 24)
59 #define NAND_Ecc_P512o (1 << 25)
60 #define NAND_Ecc_P1024o (1 << 26)
61 #define NAND_Ecc_P2048o (1 << 27)
63 #define TF(value) (value ? 1 : 0)
65 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
66 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
67 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
68 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
69 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
70 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
71 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
72 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
74 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
75 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
76 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
77 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
78 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
79 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
80 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
81 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
83 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
84 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
85 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
86 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
87 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
88 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
89 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
90 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
92 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
93 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
94 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
95 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
96 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
97 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
98 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
99 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
101 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
102 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
104 #define PREFETCH_CONFIG1_CS_SHIFT 24
105 #define ECC_CONFIG_CS_SHIFT 1
107 #define ENABLE_PREFETCH (0x1 << 7)
108 #define DMA_MPU_MODE_SHIFT 2
109 #define ECCSIZE1_SHIFT 22
110 #define ECC1RESULTSIZE 0x1
111 #define ECCCLEAR 0x100
119 static uint8_t scan_ff_pattern[] = { 0xff };
124 .pattern = scan_ff_pattern,
150 #ifdef CONFIG_MTD_NAND_OMAP_BCH
164 static int omap_prefetch_enable(
int cs,
int fifo_th,
int dma_mode,
172 if (
readl(info->
reg.gpmc_prefetch_control))
176 writel(u32_count, info->
reg.gpmc_prefetch_config2);
184 writel(val, info->
reg.gpmc_prefetch_config1);
187 writel(0x1, info->
reg.gpmc_prefetch_control);
195 static int omap_prefetch_reset(
int cs,
struct omap_nand_info *info)
200 config1 =
readl(info->
reg.gpmc_prefetch_config1);
205 writel(0x0, info->
reg.gpmc_prefetch_control);
208 writel(0x0, info->
reg.gpmc_prefetch_config1);
224 static void omap_hwcontrol(
struct mtd_info *mtd,
int cmd,
unsigned int ctrl)
231 writeb(cmd, info->
reg.gpmc_nand_command);
234 writeb(cmd, info->
reg.gpmc_nand_address);
271 status =
readl(info->
reg.gpmc_status) &
283 static void omap_read_buf16(
struct mtd_info *mtd,
u_char *buf,
int len)
296 static void omap_write_buf16(
struct mtd_info *mtd,
const u_char * buf,
int len)
309 status =
readl(info->
reg.gpmc_status) &
321 static void omap_read_buf_pref(
struct mtd_info *mtd,
u_char *buf,
int len)
332 omap_read_buf16(mtd, buf, len % 4);
334 omap_read_buf8(mtd, buf, len % 4);
335 p = (
u32 *) (buf + len % 4);
340 ret = omap_prefetch_enable(info->
gpmc_cs,
345 omap_read_buf16(mtd, (
u_char *)p, len);
347 omap_read_buf8(mtd, (
u_char *)p, len);
350 r_count =
readl(info->
reg.gpmc_prefetch_status);
352 r_count = r_count >> 2;
358 omap_prefetch_reset(info->
gpmc_cs, info);
368 static void omap_write_buf_pref(
struct mtd_info *mtd,
369 const u_char *buf,
int len)
376 unsigned long tim,
limit;
382 p = (
u16 *)(buf + 1);
387 ret = omap_prefetch_enable(info->
gpmc_cs,
392 omap_write_buf16(mtd, (
u_char *)p, len);
394 omap_write_buf8(mtd, (
u_char *)p, len);
397 w_count =
readl(info->
reg.gpmc_prefetch_status);
399 w_count = w_count >> 1;
400 for (i = 0; (i < w_count) && len; i++, len -= 2)
409 val =
readl(info->
reg.gpmc_prefetch_status);
411 }
while (val && (tim++ < limit));
414 omap_prefetch_reset(info->
gpmc_cs, info);
422 static void omap_nand_dma_callback(
void *
data)
434 static inline int omap_nand_dma_transfer(
struct mtd_info *mtd,
void *
addr,
435 unsigned int len,
int is_write)
443 unsigned long tim,
limit;
448 if (addr >= high_memory) {
452 ((
size_t)(addr + len - 1) & PAGE_MASK))
464 "Couldn't DMA map a %d byte buffer\n", len);
468 tx = dmaengine_prep_slave_sg(info->
dma, &
sg, n,
474 tx->
callback = omap_nand_dma_callback;
476 dmaengine_submit(tx);
479 ret = omap_prefetch_enable(info->
gpmc_cs,
485 init_completion(&info->
comp);
486 dma_async_issue_pending(info->
dma);
495 val =
readl(info->
reg.gpmc_prefetch_status);
497 }
while (val && (tim++ < limit));
500 omap_prefetch_reset(info->
gpmc_cs, info);
509 is_write == 0 ? omap_read_buf16(mtd, (
u_char *) addr, len)
510 : omap_write_buf16(mtd, (
u_char *) addr, len);
512 is_write == 0 ? omap_read_buf8(mtd, (
u_char *) addr, len)
513 : omap_write_buf8(mtd, (
u_char *) addr, len);
523 static void omap_read_buf_dma_pref(
struct mtd_info *mtd,
u_char *buf,
int len)
525 if (len <= mtd->oobsize)
526 omap_read_buf_pref(mtd, buf, len);
529 omap_nand_dma_transfer(mtd, buf, len, 0x0);
538 static void omap_write_buf_dma_pref(
struct mtd_info *mtd,
539 const u_char *buf,
int len)
541 if (len <= mtd->oobsize)
542 omap_write_buf_pref(mtd, buf, len);
545 omap_nand_dma_transfer(mtd, (
u_char *) buf, len, 0x1);
558 bytes =
readl(info->
reg.gpmc_prefetch_status);
560 bytes = bytes & 0xFFFC;
570 (
u32 *)info->
buf, bytes >> 2);
576 (
u32 *)info->
buf, bytes >> 2);
600 static void omap_read_buf_irq_pref(
struct mtd_info *mtd,
u_char *buf,
int len)
606 if (len <= mtd->oobsize) {
607 omap_read_buf_pref(mtd, buf, len);
613 init_completion(&info->
comp);
616 ret = omap_prefetch_enable(info->
gpmc_cs,
631 omap_prefetch_reset(info->
gpmc_cs, info);
636 omap_read_buf16(mtd, buf, len);
638 omap_read_buf8(mtd, buf, len);
647 static void omap_write_buf_irq_pref(
struct mtd_info *mtd,
648 const u_char *buf,
int len)
653 unsigned long tim,
limit;
656 if (len <= mtd->oobsize) {
657 omap_write_buf_pref(mtd, buf, len);
663 init_completion(&info->
comp);
666 ret = omap_prefetch_enable(info->
gpmc_cs,
684 val =
readl(info->
reg.gpmc_prefetch_status);
687 }
while (val && (tim++ < limit));
690 omap_prefetch_reset(info->
gpmc_cs, info);
695 omap_write_buf16(mtd, buf, len);
697 omap_write_buf8(mtd, buf, len);
707 static void gen_true_ecc(
u8 *ecc_buf)
709 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
710 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
731 static int omap_compare_ecc(
u8 *ecc_data1,
736 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
737 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
744 isEccFF = ((*(
u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
746 gen_true_ecc(ecc_data1);
747 gen_true_ecc(ecc_data2);
749 for (i = 0; i <= 2; i++) {
750 *(ecc_data1 +
i) = ~(*(ecc_data1 + i));
751 *(ecc_data2 +
i) = ~(*(ecc_data2 + i));
754 for (i = 0; i < 8; i++) {
755 tmp0_bit[
i] = *ecc_data1 % 2;
756 *ecc_data1 = *ecc_data1 / 2;
759 for (i = 0; i < 8; i++) {
760 tmp1_bit[
i] = *(ecc_data1 + 1) % 2;
761 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
764 for (i = 0; i < 8; i++) {
765 tmp2_bit[
i] = *(ecc_data1 + 2) % 2;
766 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
769 for (i = 0; i < 8; i++) {
770 comp0_bit[
i] = *ecc_data2 % 2;
771 *ecc_data2 = *ecc_data2 / 2;
774 for (i = 0; i < 8; i++) {
775 comp1_bit[
i] = *(ecc_data2 + 1) % 2;
776 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
779 for (i = 0; i < 8; i++) {
780 comp2_bit[
i] = *(ecc_data2 + 2) % 2;
781 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
784 for (i = 0; i < 6; i++)
785 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
787 for (i = 0; i < 8; i++)
788 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
790 for (i = 0; i < 8; i++)
791 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
793 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
794 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
796 for (i = 0; i < 24; i++)
797 ecc_sum += ecc_bit[i];
808 pr_debug(
"ECC UNCORRECTED_ERROR 1\n");
813 pr_debug(
"ECC UNCORRECTED_ERROR B\n");
818 find_byte = (ecc_bit[23] << 8) +
828 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
830 pr_debug(
"Correcting single bit ECC error at offset: "
831 "%d, bit: %d\n", find_byte, find_bit);
833 page_data[find_byte] ^= (1 << find_bit);
838 if (ecc_data2[0] == 0 &&
843 pr_debug(
"UNCORRECTED_ERROR default\n");
867 int blockCnt = 0, i = 0, ret = 0;
872 (info->
nand.ecc.size == 2048))
877 for (i = 0; i < blockCnt; i++) {
878 if (
memcmp(read_ecc, calc_ecc, 3) != 0) {
879 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
904 static int omap_calculate_ecc(
struct mtd_info *mtd,
const u_char *dat,
911 val =
readl(info->
reg.gpmc_ecc_config);
916 val =
readl(info->
reg.gpmc_ecc1_result);
918 *ecc_code++ = val >> 16;
920 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
930 static void omap_enable_hwecc(
struct mtd_info *mtd,
int mode)
945 writel(val, info->
reg.gpmc_ecc_size_config);
957 "error: unrecognized Mode[%d]!\n", mode);
962 val = (dev_width << 7) | (info->
gpmc_cs << 1) | (0x1);
987 timeo += (
HZ * 400) / 1000;
989 timeo += (
HZ * 20) / 1000;
993 status =
readb(info->
reg.gpmc_nand_data);
1007 static int omap_dev_ready(
struct mtd_info *mtd)
1009 unsigned int val = 0;
1013 val =
readl(info->
reg.gpmc_status);
1015 if ((val & 0x100) == 0x100) {
1022 #ifdef CONFIG_MTD_NAND_OMAP_BCH
1029 static void omap3_enable_hwecc_bch(
struct mtd_info *mtd,
int mode)
1032 unsigned int dev_width;
1037 nerrors = (info->
nand.ecc.bytes == 13) ? 8 : 4;
1044 (
void)gpmc_enable_hwecc_bch(info->
gpmc_cs, mode, dev_width, 1, nerrors);
1053 static int omap3_calculate_ecc_bch4(
struct mtd_info *mtd,
const u_char *dat,
1058 return gpmc_calculate_ecc_bch4(info->
gpmc_cs, dat, ecc_code);
1067 static int omap3_calculate_ecc_bch8(
struct mtd_info *mtd,
const u_char *dat,
1072 return gpmc_calculate_ecc_bch8(info->
gpmc_cs, dat, ecc_code);
1082 static int omap3_correct_data_bch(
struct mtd_info *mtd,
u_char *data,
1087 unsigned int errloc[8];
1095 for (i = 0; i <
count; i++) {
1097 if (errloc[i] < 8*512)
1098 data[errloc[
i]/8] ^= 1 << (errloc[
i] & 7);
1099 pr_debug(
"corrected bitflip %u\n", errloc[i]);
1101 }
else if (count < 0) {
1102 pr_err(
"ecc unrecoverable error\n");
1111 static void omap3_free_bch(
struct mtd_info *mtd)
1126 static int omap3_init_bch(
struct mtd_info *mtd,
int ecc_opt)
1128 int ret, max_errors;
1131 #ifdef CONFIG_MTD_NAND_OMAP_BCH8
1132 const int hw_errors = 8;
1134 const int hw_errors = 4;
1139 if (max_errors != hw_errors) {
1140 pr_err(
"cannot configure %d-bit BCH ecc, only %d-bit supported",
1141 max_errors, hw_errors);
1146 ret = gpmc_init_hwecc_bch(info->
gpmc_cs, 1, max_errors);
1151 info->bch =
init_bch(13, max_errors, 0x201b );
1155 info->
nand.ecc.size = 512;
1156 info->
nand.ecc.hwctl = omap3_enable_hwecc_bch;
1157 info->
nand.ecc.correct = omap3_correct_data_bch;
1166 if (max_errors == 8) {
1167 info->
nand.ecc.strength = 8;
1168 info->
nand.ecc.bytes = 13;
1169 info->
nand.ecc.calculate = omap3_calculate_ecc_bch8;
1171 info->
nand.ecc.strength = 4;
1172 info->
nand.ecc.bytes = 7;
1173 info->
nand.ecc.calculate = omap3_calculate_ecc_bch4;
1176 pr_info(
"enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1179 omap3_free_bch(mtd);
1187 static int omap3_init_bch_tail(
struct mtd_info *mtd)
1200 pr_err(
"BCH ecc is not supported on small page devices\n");
1206 pr_err(
"no oob layout available for oobsize %d eccbytes %u\n",
1212 for (i = 0; i < layout->
eccbytes; i++)
1215 layout->
oobfree[0].offset = 2;
1217 info->
nand.ecc.layout = layout;
1220 info->
nand.badblock_pattern = &bb_descrip_flashbased;
1223 omap3_free_bch(mtd);
1228 static int omap3_init_bch(
struct mtd_info *mtd,
int ecc_opt)
1230 pr_err(
"CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1233 static int omap3_init_bch_tail(
struct mtd_info *mtd)
1237 static void omap3_free_bch(
struct mtd_info *mtd)
1252 pdata = pdev->
dev.platform_data;
1253 if (pdata ==
NULL) {
1254 dev_err(&pdev->
dev,
"platform data missing\n");
1262 platform_set_drvdata(pdev, info);
1273 info->
mtd.name = dev_name(&pdev->
dev);
1282 dev_err(&pdev->
dev,
"error getting memory resource\n");
1287 info->
mem_size = resource_size(res);
1290 pdev->
dev.driver->name)) {
1296 if (!info->
nand.IO_ADDR_R) {
1298 goto out_release_mem_region;
1303 info->
nand.IO_ADDR_W = info->
nand.IO_ADDR_R;
1304 info->
nand.cmd_ctrl = omap_hwcontrol;
1314 info->
nand.dev_ready = omap_dev_ready;
1315 info->
nand.chip_delay = 0;
1317 info->
nand.waitfunc = omap_wait;
1318 info->
nand.chip_delay = 50;
1323 info->
nand.read_buf = omap_read_buf_pref;
1324 info->
nand.write_buf = omap_write_buf_pref;
1329 info->
nand.read_buf = omap_read_buf16;
1330 info->
nand.write_buf = omap_write_buf16;
1332 info->
nand.read_buf = omap_read_buf8;
1333 info->
nand.write_buf = omap_write_buf8;
1343 dev_err(&pdev->
dev,
"DMA engine request failed\n");
1345 goto out_release_mem_region;
1354 cfg.src_maxburst = 16;
1355 cfg.dst_maxburst = 16;
1356 err = dmaengine_slave_config(info->
dma, &
cfg);
1358 dev_err(&pdev->
dev,
"DMA engine slave config failed: %d\n",
1360 goto out_release_mem_region;
1362 info->
nand.read_buf = omap_read_buf_dma_pref;
1363 info->
nand.write_buf = omap_write_buf_dma_pref;
1370 dev_err(&pdev->
dev,
"error getting fifo irq\n");
1372 goto out_release_mem_region;
1377 dev_err(&pdev->
dev,
"requesting irq(%d) error:%d",
1380 goto out_release_mem_region;
1385 dev_err(&pdev->
dev,
"error getting count irq\n");
1387 goto out_release_mem_region;
1392 dev_err(&pdev->
dev,
"requesting irq(%d) error:%d",
1395 goto out_release_mem_region;
1398 info->
nand.read_buf = omap_read_buf_irq_pref;
1399 info->
nand.write_buf = omap_write_buf_irq_pref;
1405 "xfer_type(%d) not supported!\n", pdata->
xfer_type);
1407 goto out_release_mem_region;
1415 info->
nand.ecc.bytes = 3;
1416 info->
nand.ecc.size = 512;
1417 info->
nand.ecc.strength = 1;
1418 info->
nand.ecc.calculate = omap_calculate_ecc;
1419 info->
nand.ecc.hwctl = omap_enable_hwecc;
1420 info->
nand.ecc.correct = omap_correct_data;
1424 err = omap3_init_bch(&info->
mtd, pdata->
ecc_opt);
1427 goto out_release_mem_region;
1438 goto out_release_mem_region;
1449 info->
nand.badblock_pattern = &bb_descrip_flashbased;
1451 omap_oobinfo.eccbytes = 3 * (info->
mtd.oobsize/16);
1452 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1453 omap_oobinfo.eccpos[i] = i+offset;
1455 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1456 omap_oobinfo.oobfree->length = info->
mtd.oobsize -
1457 (offset + omap_oobinfo.eccbytes);
1459 info->
nand.ecc.layout = &omap_oobinfo;
1463 err = omap3_init_bch_tail(&info->
mtd);
1466 goto out_release_mem_region;
1473 goto out_release_mem_region;
1479 platform_set_drvdata(pdev, &info->
mtd);
1483 out_release_mem_region:
1499 struct mtd_info *mtd = platform_get_drvdata(pdev);
1502 omap3_free_bch(&info->
mtd);
1504 platform_set_drvdata(pdev,
NULL);
1522 .probe = omap_nand_probe,
1523 .remove = omap_nand_remove,