23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
35 #include <linux/reboot.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
52 static int cfi_amdstd_write_words(
struct mtd_info *, loff_t,
size_t,
size_t *,
const u_char *);
53 static int cfi_amdstd_write_buffers(
struct mtd_info *, loff_t,
size_t,
size_t *,
const u_char *);
56 static void cfi_amdstd_sync (
struct mtd_info *);
57 static int cfi_amdstd_suspend (
struct mtd_info *);
58 static void cfi_amdstd_resume (
struct mtd_info *);
59 static int cfi_amdstd_reboot(
struct notifier_block *,
unsigned long,
void *);
60 static int cfi_amdstd_secsi_read (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
62 static int cfi_amdstd_panic_write(
struct mtd_info *mtd, loff_t to,
size_t len,
65 static void cfi_amdstd_destroy(
struct mtd_info *);
79 .destroy = cfi_amdstd_destroy,
80 .name =
"cfi_cmdset_0002",
88 #ifdef DEBUG_CFI_FEATURES
91 const char* erase_suspend[3] = {
92 "Not supported",
"Read only",
"Read/write"
94 const char* top_bottom[6] = {
95 "No WP",
"8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot",
"Top boot",
97 "Uniform, Bottom WP",
"Uniform, Top WP"
101 printk(
" Address sensitive unlock: %s\n",
110 printk(
" Block protection: Not supported\n");
112 printk(
" Block protection: %d sectors per group\n", extp->
BlkProt);
115 printk(
" Temporary block unprotect: %s\n",
119 printk(
" Burst mode: %s\n",
120 extp->
BurstMode ?
"Supported" :
"Not supported");
122 printk(
" Page mode: Not supported\n");
126 printk(
" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
128 printk(
" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
138 #ifdef AMD_BOOTLOC_BUG
140 static void fixup_amd_bootblock(
struct mtd_info *mtd)
148 if (((major << 8) | minor) < 0x3131) {
151 pr_debug(
"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
159 if (((cfi->
id == 0xBA) || (cfi->
id == 0x22BA)) &&
171 pr_debug(
"%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->
name);
175 if (cfi->
id & 0x80) {
182 pr_debug(
"%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->
name, major, minor,
184 extp->
TopBottom == 2 ?
"bottom" :
"top");
189 static void fixup_use_write_buffers(
struct mtd_info *mtd)
193 if (cfi->
cfiq->BufWriteTimeoutTyp) {
194 pr_debug(
"Using buffer write method\n" );
195 mtd->
_write = cfi_amdstd_write_buffers;
200 static void fixup_convert_atmel_pri(
struct mtd_info *mtd)
207 memcpy(&atmel_pri, extp,
sizeof(atmel_pri));
208 memset((
char *)extp + 5, 0,
sizeof(*extp) - 5);
210 if (atmel_pri.Features & 0x02)
215 if (atmel_pri.BottomBoot)
220 if (atmel_pri.BottomBoot)
227 cfi->
cfiq->BufWriteTimeoutTyp = 0;
228 cfi->
cfiq->BufWriteTimeoutMax = 0;
231 static void fixup_use_secsi(
struct mtd_info *mtd)
238 static void fixup_use_erase_chip(
struct mtd_info *mtd)
242 if ((cfi->
cfiq->NumEraseRegions == 1) &&
243 ((cfi->
cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->
_erase = cfi_amdstd_erase_chip;
253 static void fixup_use_atmel_lock(
struct mtd_info *mtd)
255 mtd->
_lock = cfi_atmel_lock;
256 mtd->
_unlock = cfi_atmel_unlock;
260 static void fixup_old_sst_eraseregion(
struct mtd_info *mtd)
271 cfi->
cfiq->NumEraseRegions = 1;
274 static void fixup_sst39vf(
struct mtd_info *mtd)
279 fixup_old_sst_eraseregion(mtd);
285 static void fixup_sst39vf_rev_b(
struct mtd_info *mtd)
290 fixup_old_sst_eraseregion(mtd);
298 static void fixup_sst38vf640x_sectorsize(
struct mtd_info *mtd)
303 fixup_sst39vf_rev_b(mtd);
309 cfi->
cfiq->EraseRegionInfo[0] = 0x002003ff;
310 pr_warning(
"%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->
name);
313 static void fixup_s29gl064n_sectors(
struct mtd_info *mtd)
318 if ((cfi->
cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->
cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning(
"%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->
name);
324 static void fixup_s29gl032n_sectors(
struct mtd_info *mtd)
329 if ((cfi->
cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->
cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning(
"%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->
name);
335 static void fixup_s29ns512p_sectors(
struct mtd_info *mtd)
344 cfi->
cfiq->EraseRegionInfo[0] = 0x020001ff;
345 pr_warning(
"%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->
name);
349 static struct cfi_fixup cfi_nopri_fixup_table[] = {
361 static struct cfi_fixup cfi_fixup_table[] = {
363 #ifdef AMD_BOOTLOC_BUG
379 {
CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize },
380 {
CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize },
381 {
CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize },
382 {
CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize },
383 #if !FORCE_WORD_WRITE
388 static struct cfi_fixup jedec_fixup_table[] = {
395 static struct cfi_fixup fixup_table[] = {
407 static void cfi_fixup_major_minor(
struct cfi_private *cfi,
419 " Extended Query version to 1.%c\n",
453 static void cfi_fixup_m29ew_erase_suspend(
struct map_info *map,
486 static void cfi_fixup_m29ew_delay_after_resume(
struct cfi_private *cfi)
511 mtd->
_erase = cfi_amdstd_erase_varsize;
512 mtd->
_write = cfi_amdstd_write_words;
513 mtd->
_read = cfi_amdstd_read;
514 mtd->
_sync = cfi_amdstd_sync;
516 mtd->
_resume = cfi_amdstd_resume;
522 pr_debug(
"MTD %s(): write buffer size %d\n", __func__,
529 unsigned char bootloc;
539 cfi_fixup_major_minor(cfi, extp);
551 "version %c.%c (%#02x/%#02x).\n",
568 #ifdef DEBUG_CFI_FEATURES
570 cfi_tell_features(extp);
574 if ((bootloc < 2) || (bootloc > 5)) {
576 "bank location (%d). Assuming bottom.\n",
581 if (bootloc == 3 && cfi->
cfiq->NumEraseRegions > 1) {
584 for (i=0; i<cfi->
cfiq->NumEraseRegions / 2; i++) {
585 int j = (cfi->
cfiq->NumEraseRegions-1)-i;
588 swap = cfi->
cfiq->EraseRegionInfo[
i];
589 cfi->
cfiq->EraseRegionInfo[
i] = cfi->
cfiq->EraseRegionInfo[
j];
613 cfi->
chips[
i].word_write_time = 1<<cfi->
cfiq->WordWriteTimeoutTyp;
614 cfi->
chips[
i].buffer_write_time = 1<<cfi->
cfiq->BufWriteTimeoutTyp;
615 cfi->
chips[
i].erase_time = 1<<cfi->
cfiq->BlockEraseTimeoutTyp;
616 cfi->
chips[
i].ref_point_counter = 0;
620 map->
fldrv = &cfi_amdstd_chipdrv;
622 return cfi_amdstd_setup(mtd);
634 unsigned long devsize = (1<<cfi->
cfiq->DevSize) * cfi->
interleave;
651 for (i=0; i<cfi->
cfiq->NumEraseRegions; i++) {
652 unsigned long ernum, ersize;
653 ersize = ((cfi->
cfiq->EraseRegionInfo[
i] >> 8) & ~0xff) * cfi->
interleave;
654 ernum = (cfi->
cfiq->EraseRegionInfo[
i] & 0xffff) + 1;
664 offset += (ersize * ernum);
702 return map_word_equal(map, d, t);
727 return map_word_equal(map, oldd, curd) &&
728 map_word_equal(map, curd, expected);
739 timeo = jiffies +
HZ;
741 switch (chip->
state) {
745 if (chip_ready(map, adr))
782 if (chip_ready(map, adr))
791 put_chip(map, chip, adr);
792 printk(
KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
835 static void put_chip(
struct map_info *map,
struct flchip *chip,
unsigned long adr)
841 cfi_fixup_m29ew_erase_suspend(map,
844 cfi_fixup_m29ew_delay_after_resume(cfi);
863 #ifdef CONFIG_MTD_XIP
912 unsigned long adr,
int usec)
952 oldstate = chip->
state;
953 if (!map_word_bitsset(map, status,
CMD(0x40)))
985 cfi_fixup_m29ew_erase_suspend(map, adr);
988 chip->
state = oldstate;
990 }
else if (usec >= 1000000/
HZ) {
1003 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1012 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1013 INVALIDATE_CACHED_RANGE(map, from, size)
1015 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1016 UDELAY(map, chip, adr, usec)
1037 #define xip_disable(map, chip, adr)
1038 #define xip_enable(map, chip, adr)
1039 #define XIP_INVAL_CACHED_RANGE(x...)
1041 #define UDELAY(map, chip, adr, usec) \
1043 mutex_unlock(&chip->mutex); \
1045 mutex_lock(&chip->mutex); \
1048 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1050 mutex_unlock(&chip->mutex); \
1051 INVALIDATE_CACHED_RANGE(map, adr, len); \
1053 mutex_lock(&chip->mutex); \
1058 static inline int do_read_onechip(
struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len,
u_char *
buf)
1060 unsigned long cmd_addr;
1067 cmd_addr = adr & ~(map_bankwidth(map)-1);
1083 put_chip(map, chip, cmd_addr);
1090 static int cfi_amdstd_read (
struct mtd_info *mtd, loff_t
from,
size_t len,
size_t *retlen,
u_char *
buf)
1100 ofs = from - (chipnum << cfi->
chipshift);
1103 unsigned long thislen;
1113 ret = do_read_onechip(map, &cfi->
chips[chipnum], ofs, thislen, buf);
1128 static inline int do_read_secsi_onechip(
struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len,
u_char *buf)
1171 static int cfi_amdstd_secsi_read (
struct mtd_info *mtd, loff_t from,
size_t len,
size_t *retlen,
u_char *buf)
1185 unsigned long thislen;
1190 if ((len + ofs -1) >> 3)
1191 thislen = (1<<3) - ofs;
1195 ret = do_read_secsi_onechip(map, &cfi->
chips[chipnum], ofs, thislen, buf);
1223 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1237 pr_debug(
"MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1238 __func__, adr, datum.
x[0] );
1247 if (map_word_equal(map, oldd, datum)) {
1264 adr, map_bankwidth(map),
1268 timeo =
jiffies + uWriteTimeout;
1284 if (
time_after(jiffies, timeo) && !chip_ready(map, adr)){
1291 if (chip_ready(map, adr))
1295 UDELAY(map, chip, adr, 1);
1298 if (!chip_good(map, adr, datum)) {
1312 put_chip(map, chip, adr);
1319 static int cfi_amdstd_write_words(
struct mtd_info *mtd, loff_t to,
size_t len,
1320 size_t *retlen,
const u_char *buf)
1326 unsigned long ofs, chipstart;
1331 chipstart = cfi->
chips[chipnum].start;
1334 if (ofs & (map_bankwidth(map)-1)) {
1335 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1336 int i = ofs - bus_ofs;
1355 tmp_buf =
map_read(map, bus_ofs+chipstart);
1360 n =
min_t(
int, len, map_bankwidth(map)-i);
1362 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1364 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1383 while(len >= map_bankwidth(map)) {
1386 datum = map_word_load(map, buf);
1388 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1393 ofs += map_bankwidth(map);
1394 buf += map_bankwidth(map);
1395 (*retlen) += map_bankwidth(map);
1396 len -= map_bankwidth(map);
1403 chipstart = cfi->
chips[chipnum].start;
1408 if (len & (map_bankwidth(map)-1)) {
1425 tmp_buf =
map_read(map, ofs + chipstart);
1429 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1431 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1447 unsigned long adr,
const u_char *buf,
1453 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1455 unsigned long cmd_adr;
1469 datum = map_word_load(map, buf);
1471 pr_debug(
"MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1472 __func__, adr, datum.
x[0] );
1487 words = len / map_bankwidth(map);
1491 while(z < words * map_bankwidth(map)) {
1492 datum = map_word_load(map, buf);
1495 z += map_bankwidth(map);
1496 buf += map_bankwidth(map);
1498 z -= map_bankwidth(map);
1507 adr, map_bankwidth(map),
1510 timeo =
jiffies + uWriteTimeout;
1527 if (
time_after(jiffies, timeo) && !chip_ready(map, adr))
1530 if (chip_ready(map, adr)) {
1536 UDELAY(map, chip, adr, 1);
1551 put_chip(map, chip, adr);
1558 static int cfi_amdstd_write_buffers(
struct mtd_info *mtd, loff_t to,
size_t len,
1559 size_t *retlen,
const u_char *buf)
1563 int wbufsize = cfi_interleave(cfi) << cfi->
cfiq->MaxBufWriteSize;
1572 if (ofs & (map_bankwidth(map)-1)) {
1573 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1574 if (local_len > len)
1576 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->
chipshift),
1577 local_len, retlen, buf);
1593 while (len >= map_bankwidth(map) * 2) {
1595 int size = wbufsize - (ofs & (wbufsize-1));
1599 if (
size % map_bankwidth(map))
1621 size_t retlen_dregs = 0;
1623 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->
chipshift),
1624 len, &retlen_dregs, buf);
1626 *retlen += retlen_dregs;
1641 static int cfi_amdstd_panic_wait(
struct map_info *map,
struct flchip *chip,
1661 while (retries > 0) {
1662 const unsigned long timeo = (HZ / 1000) + 1;
1669 if (chip_ready(map, adr))
1691 static int do_panic_write_oneword(
struct map_info *map,
struct flchip *chip,
1694 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1703 ret = cfi_amdstd_panic_wait(map, chip, adr);
1707 pr_debug(
"MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1708 __func__, adr, datum.
x[0]);
1717 if (map_word_equal(map, oldd, datum)) {
1718 pr_debug(
"MTD %s(): NOP\n", __func__);
1731 if (chip_ready(map, adr))
1737 if (!chip_good(map, adr, datum)) {
1766 static int cfi_amdstd_panic_write(
struct mtd_info *mtd, loff_t to,
size_t len,
1767 size_t *retlen,
const u_char *buf)
1771 unsigned long ofs, chipstart;
1777 chipstart = cfi->
chips[chipnum].start;
1780 if (ofs & (map_bankwidth(map) - 1)) {
1781 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1782 int i = ofs - bus_ofs;
1786 ret = cfi_amdstd_panic_wait(map, &cfi->
chips[chipnum], bus_ofs);
1791 tmp_buf =
map_read(map, bus_ofs + chipstart);
1794 n =
min_t(
int, len, map_bankwidth(map) - i);
1796 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1798 ret = do_panic_write_oneword(map, &cfi->
chips[chipnum],
1817 while (len >= map_bankwidth(map)) {
1820 datum = map_word_load(map, buf);
1822 ret = do_panic_write_oneword(map, &cfi->
chips[chipnum],
1827 ofs += map_bankwidth(map);
1828 buf += map_bankwidth(map);
1829 (*retlen) += map_bankwidth(map);
1830 len -= map_bankwidth(map);
1838 chipstart = cfi->
chips[chipnum].start;
1843 if (len & (map_bankwidth(map) - 1)) {
1846 ret = cfi_amdstd_panic_wait(map, &cfi->
chips[chipnum], ofs);
1850 tmp_buf =
map_read(map, ofs + chipstart);
1852 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1854 ret = do_panic_write_oneword(map, &cfi->
chips[chipnum],
1874 unsigned long int adr;
1887 pr_debug(
"MTD %s(): ERASE 0x%.8lx\n",
1888 __func__, chip->
start );
1929 if (chip_ready(map, adr))
1939 UDELAY(map, chip, adr, 1000000/HZ);
1942 if (!chip_good(map, adr, map_word_ff(map))) {
1953 put_chip(map, chip, adr);
1976 pr_debug(
"MTD %s(): ERASE 0x%.8lx\n",
2018 if (chip_ready(map, adr)) {
2031 UDELAY(map, chip, adr, 1000000/HZ);
2034 if (!chip_good(map, adr, map_word_ff(map))) {
2044 put_chip(map, chip, adr);
2052 unsigned long ofs, len;
2075 if (instr->
addr != 0)
2081 ret = do_erase_chip(map, &cfi->
chips[0]);
2091 static int do_atmel_lock(
struct map_info *map,
struct flchip *chip,
2092 unsigned long adr,
int len,
void *thunk)
2103 pr_debug(
"MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2118 put_chip(map, chip, adr + chip->
start);
2126 static int do_atmel_unlock(
struct map_info *map,
struct flchip *chip,
2127 unsigned long adr,
int len,
void *thunk)
2138 pr_debug(
"MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2145 put_chip(map, chip, adr + chip->
start);
2158 static int cfi_atmel_unlock(
struct mtd_info *mtd, loff_t ofs,
uint64_t len)
2164 static void cfi_amdstd_sync (
struct mtd_info *mtd)
2173 for (i=0; !ret && i<cfi->
numchips; i++) {
2179 switch(chip->
state) {
2211 for (i--; i >=0; i--) {
2225 static int cfi_amdstd_suspend(
struct mtd_info *mtd)
2233 for (i=0; !ret && i<cfi->
numchips; i++) {
2238 switch(chip->
state) {
2262 for (i--; i >=0; i--) {
2279 static void cfi_amdstd_resume(
struct mtd_info *mtd)
2298 printk(
KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2311 static int cfi_amdstd_reset(
struct mtd_info *mtd)
2318 for (i = 0; i < cfi->
numchips; i++) {
2328 put_chip(map, chip, chip->
start);
2344 cfi_amdstd_reset(mtd);
2349 static void cfi_amdstd_destroy(
struct mtd_info *mtd)
2354 cfi_amdstd_reset(mtd);