20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
43 #define FORCE_WORD_WRITE 0
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define PF38F4476 0x881c
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
55 #define AT49BV640DT 0x02db
57 static int cfi_intelext_read (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
58 static int cfi_intelext_write_words(
struct mtd_info *, loff_t,
size_t,
size_t *,
const u_char *);
59 static int cfi_intelext_write_buffers(
struct mtd_info *, loff_t,
size_t,
size_t *,
const u_char *);
60 static int cfi_intelext_writev(
struct mtd_info *,
const struct kvec *,
unsigned long, loff_t,
size_t *);
62 static void cfi_intelext_sync (
struct mtd_info *);
63 static int cfi_intelext_lock(
struct mtd_info *mtd, loff_t ofs,
uint64_t len);
64 static int cfi_intelext_unlock(
struct mtd_info *mtd, loff_t ofs,
uint64_t len);
65 static int cfi_intelext_is_locked(
struct mtd_info *mtd, loff_t ofs,
68 static int cfi_intelext_read_fact_prot_reg (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
69 static int cfi_intelext_read_user_prot_reg (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
70 static int cfi_intelext_write_user_prot_reg (
struct mtd_info *, loff_t,
size_t,
size_t *,
u_char *);
71 static int cfi_intelext_lock_user_prot_reg (
struct mtd_info *, loff_t,
size_t);
72 static int cfi_intelext_get_fact_prot_info (
struct mtd_info *,
74 static int cfi_intelext_get_user_prot_info (
struct mtd_info *,
77 static int cfi_intelext_suspend (
struct mtd_info *);
78 static void cfi_intelext_resume (
struct mtd_info *);
79 static int cfi_intelext_reboot (
struct notifier_block *,
unsigned long,
void *);
81 static void cfi_intelext_destroy(
struct mtd_info *);
88 static int cfi_intelext_point (
struct mtd_info *mtd, loff_t
from,
size_t len,
90 static int cfi_intelext_unpoint(
struct mtd_info *mtd, loff_t
from,
size_t len);
105 .destroy = cfi_intelext_destroy,
106 .name =
"cfi_cmdset_0001",
113 #ifdef DEBUG_CFI_FEATURES
130 for (i=11; i<32; i++) {
132 printk(
" - Unknown Bit %X: supported\n", i);
137 for (i=1; i<8; i++) {
139 printk(
" - Unknown Bit %X: supported\n", i);
145 for (i=2; i<3; i++) {
147 printk(
" - Unknown Bit %X Active: yes\n",i);
151 for (i=6; i<16; i++) {
153 printk(
" - Unknown Bit %X Active: yes\n",i);
156 printk(
" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
159 printk(
" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165 static void fixup_convert_atmel_pri(
struct mtd_info *mtd)
178 memcpy(&atmel_pri, extp,
sizeof(atmel_pri));
179 memset((
char *)extp + 5, 0,
sizeof(*extp) - 5);
183 if (atmel_pri.Features & 0x01)
185 if (atmel_pri.Features & 0x02)
187 if (atmel_pri.Features & 0x04)
189 if (atmel_pri.Features & 0x08)
191 if (atmel_pri.Features & 0x20)
193 if (atmel_pri.Features & 0x40)
195 if (atmel_pri.Features & 0x80)
201 cfi->
cfiq->BufWriteTimeoutTyp = 0;
202 cfi->
cfiq->BufWriteTimeoutMax = 0;
205 static void fixup_at49bv640dx_lock(
struct mtd_info *mtd)
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
217 static void fixup_intel_strataflash(
struct mtd_info *mtd)
224 "erase on write disabled.\n");
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(
struct mtd_info *mtd)
243 static void fixup_st_m28w320ct(
struct mtd_info *mtd)
248 cfi->
cfiq->BufWriteTimeoutTyp = 0;
249 cfi->
cfiq->BufWriteTimeoutMax = 0;
252 static void fixup_st_m28w320cb(
struct mtd_info *mtd)
258 cfi->
cfiq->EraseRegionInfo[1] =
259 (cfi->
cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
262 static void fixup_use_point(
struct mtd_info *mtd)
266 mtd->
_point = cfi_intelext_point;
267 mtd->
_unpoint = cfi_intelext_unpoint;
271 static void fixup_use_write_buffers(
struct mtd_info *mtd)
275 if (cfi->
cfiq->BufWriteTimeoutTyp) {
277 mtd->
_write = cfi_intelext_write_buffers;
278 mtd->
_writev = cfi_intelext_writev;
285 static void fixup_unlock_powerup_lock(
struct mtd_info *mtd)
297 static struct cfi_fixup cfi_fixup_table[] = {
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
307 #if !FORCE_WORD_WRITE
316 static struct cfi_fixup jedec_fixup_table[] = {
324 static struct cfi_fixup fixup_table[] = {
334 static void cfi_fixup_major_minor(
struct cfi_private *cfi,
348 unsigned int extp_size =
sizeof(*extp);
355 cfi_fixup_major_minor(cfi, extp);
384 extra_size += extp->
extra[extra_size - 1];
394 nb_parts = extp->
extra[extra_size - 1];
400 for (i = 0; i < nb_parts; i++) {
403 extra_size +=
sizeof(*rinfo);
418 if (extp_size > 4096) {
420 "%s: cfi_pri_intelext is too fat\n",
446 mtd->
_erase = cfi_intelext_erase_varsize;
447 mtd->
_read = cfi_intelext_read;
448 mtd->
_write = cfi_intelext_write_words;
449 mtd->
_sync = cfi_intelext_sync;
450 mtd->
_lock = cfi_intelext_lock;
451 mtd->
_unlock = cfi_intelext_unlock;
453 mtd->
_suspend = cfi_intelext_suspend;
454 mtd->
_resume = cfi_intelext_resume;
471 extp = read_pri_intelext(map, adr);
482 #ifdef DEBUG_CFI_FEATURES
484 cfi_tell_features(extp);
499 if (cfi->
cfiq->WordWriteTimeoutTyp)
500 cfi->
chips[
i].word_write_time =
501 1<<cfi->
cfiq->WordWriteTimeoutTyp;
503 cfi->
chips[
i].word_write_time = 50000;
505 if (cfi->
cfiq->BufWriteTimeoutTyp)
506 cfi->
chips[
i].buffer_write_time =
507 1<<cfi->
cfiq->BufWriteTimeoutTyp;
510 if (cfi->
cfiq->BlockEraseTimeoutTyp)
511 cfi->
chips[
i].erase_time =
512 1000<<cfi->
cfiq->BlockEraseTimeoutTyp;
514 cfi->
chips[
i].erase_time = 2000000;
516 if (cfi->
cfiq->WordWriteTimeoutTyp &&
517 cfi->
cfiq->WordWriteTimeoutMax)
518 cfi->
chips[
i].word_write_time_max =
519 1<<(cfi->
cfiq->WordWriteTimeoutTyp +
520 cfi->
cfiq->WordWriteTimeoutMax);
522 cfi->
chips[
i].word_write_time_max = 50000 * 8;
524 if (cfi->
cfiq->BufWriteTimeoutTyp &&
525 cfi->
cfiq->BufWriteTimeoutMax)
526 cfi->
chips[
i].buffer_write_time_max =
527 1<<(cfi->
cfiq->BufWriteTimeoutTyp +
528 cfi->
cfiq->BufWriteTimeoutMax);
530 if (cfi->
cfiq->BlockEraseTimeoutTyp &&
531 cfi->
cfiq->BlockEraseTimeoutMax)
532 cfi->
chips[
i].erase_time_max =
533 1000<<(cfi->
cfiq->BlockEraseTimeoutTyp +
534 cfi->
cfiq->BlockEraseTimeoutMax);
536 cfi->
chips[
i].erase_time_max = 2000000 * 8;
538 cfi->
chips[
i].ref_point_counter = 0;
542 map->
fldrv = &cfi_intelext_chipdrv;
544 return cfi_intelext_setup(mtd);
558 unsigned long devsize = (1<<cfi->
cfiq->DevSize) * cfi->
interleave;
568 printk(
KERN_ERR "Failed to allocate memory for MTD erase region info\n");
572 for (i=0; i<cfi->
cfiq->NumEraseRegions; i++) {
573 unsigned long ernum, ersize;
574 ersize = ((cfi->
cfiq->EraseRegionInfo[
i] >> 8) & ~0xff) * cfi->
interleave;
575 ernum = (cfi->
cfiq->EraseRegionInfo[
i] & 0xffff) + 1;
586 offset += (ersize * ernum);
589 if (offset != devsize) {
591 printk(
KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
602 #ifdef CONFIG_MTD_OTP
613 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
627 static int cfi_intelext_partition_fixup(
struct mtd_info *mtd,
650 int offs, numregions, numparts, partshift, numvirtchips,
i,
j;
657 offs += extp->
extra[offs+1]+2;
669 for (i = 0; i < numregions; i++) {
673 offs +=
sizeof(*rinfo)
687 printk(
KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
702 "%s: bad number of hw partitions (%d)\n",
707 numvirtchips = cfi->
numchips * numparts;
720 chip = &newcfi->
chips[0];
721 for (i = 0; i < cfi->
numchips; i++) {
724 for (j = 0; j < numparts; j++) {
726 chip->
start += j << partshift;
727 chip->
priv = &shared[
i];
737 "--> %d partitions of %d KiB\n",
752 static int chip_ready (
struct map_info *map,
struct flchip *chip,
unsigned long adr,
int mode)
764 switch (chip->
state) {
817 put_chip(map, chip, adr);
819 "suspended: status = 0x%lx\n", map->
name, status.
x[0]);
891 if (contender && contender != chip) {
906 ret = chip_ready(map, contender, contender->
start, mode);
922 put_chip(map, contender, contender->
start);
949 ret = chip_ready(map, chip, adr, mode);
956 static void put_chip(
struct map_info *map,
struct flchip *chip,
unsigned long adr)
972 put_chip(map, loaner, loaner->
start);
1027 #ifdef CONFIG_MTD_XIP
1074 static int __xipram xip_wait_for_operation(
1076 unsigned long adr,
unsigned int chip_op_time_max)
1085 usec = chip_op_time_max;
1124 oldstate = chip->
state;
1126 if (!map_word_bitsset(map, status,
CMD(0x40)))
1131 if (!map_word_bitsset(map, status,
CMD(0x04)))
1136 chip->
state = newstate;
1152 while (chip->
state != newstate) {
1167 chip->
state = oldstate;
1169 }
else if (usec >= 1000000/HZ) {
1182 return (done >= usec) ? -
ETIME : 0;
1192 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1193 INVALIDATE_CACHED_RANGE(map, from, size)
1195 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1196 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1200 #define xip_disable(map, chip, adr)
1201 #define xip_enable(map, chip, adr)
1202 #define XIP_INVAL_CACHED_RANGE(x...)
1203 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1205 static int inval_cache_and_wait_for_operation(
1207 unsigned long cmd_adr,
unsigned long inval_adr,
int inval_len,
1208 unsigned int chip_op_time,
unsigned int chip_op_time_max)
1212 int chip_state = chip->
state;
1213 unsigned int timeo, sleep_time, reset_timeo;
1220 timeo = chip_op_time_max;
1223 reset_timeo = timeo;
1224 sleep_time = chip_op_time / 2;
1227 if (chip->
state != chip_state) {
1245 timeo = reset_timeo;
1250 timeo = reset_timeo;
1261 if (sleep_time >= 1000000/HZ) {
1268 timeo -= sleep_time;
1269 sleep_time = 1000000/
HZ;
1285 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1286 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1289 static int do_point_onechip (
struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len)
1291 unsigned long cmd_addr;
1298 cmd_addr = adr & ~(map_bankwidth(map)-1);
1316 static int cfi_intelext_point(
struct mtd_info *mtd, loff_t
from,
size_t len,
1321 unsigned long ofs, last_end = 0;
1332 ofs = from - (chipnum << cfi->
chipshift);
1334 *virt = map->
virt + cfi->
chips[chipnum].start + ofs;
1336 *phys = map->
phys + cfi->
chips[chipnum].start + ofs;
1339 unsigned long thislen;
1346 last_end = cfi->
chips[chipnum].start;
1347 else if (cfi->
chips[chipnum].start != last_end)
1355 ret = do_point_onechip(map, &cfi->
chips[chipnum], ofs, thislen);
1369 static int cfi_intelext_unpoint(
struct mtd_info *mtd, loff_t from,
size_t len)
1374 int chipnum,
err = 0;
1380 ofs = from - (chipnum << cfi->
chipshift);
1382 while (len && !err) {
1383 unsigned long thislen;
1386 chip = &cfi->
chips[chipnum];
1405 put_chip(map, chip, chip->
start);
1416 static inline int do_read_onechip(
struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len,
u_char *
buf)
1418 unsigned long cmd_addr;
1425 cmd_addr = adr & ~(map_bankwidth(map)-1);
1442 put_chip(map, chip, cmd_addr);
1448 static int cfi_intelext_read (
struct mtd_info *mtd, loff_t from,
size_t len,
size_t *retlen,
u_char *buf)
1458 ofs = from - (chipnum << cfi->
chipshift);
1461 unsigned long thislen;
1471 ret = do_read_onechip(map, &cfi->
chips[chipnum], ofs, thislen, buf);
1486 unsigned long adr,
map_word datum,
int mode)
1499 write_cmd =
CMD(0xc0);
1506 ret =
get_chip(map, chip, adr, mode);
1520 adr, map_bankwidth(map),
1531 if (map_word_bitsset(map, status,
CMD(0x1a))) {
1539 if (chipstatus & 0x02) {
1541 }
else if (chipstatus & 0x08) {
1554 put_chip(map, chip, adr);
1560 static int cfi_intelext_write_words (
struct mtd_info *mtd, loff_t to ,
size_t len,
size_t *retlen,
const u_char *buf)
1572 if (ofs & (map_bankwidth(map)-1)) {
1573 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1574 int gap = ofs - bus_ofs;
1578 n =
min_t(
int, len, map_bankwidth(map)-gap);
1579 datum = map_word_ff(map);
1580 datum = map_word_load_partial(map, datum, buf, gap, n);
1582 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1600 while(len >= map_bankwidth(map)) {
1601 map_word datum = map_word_load(map, buf);
1603 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1608 ofs += map_bankwidth(map);
1609 buf += map_bankwidth(map);
1610 (*retlen) += map_bankwidth(map);
1611 len -= map_bankwidth(map);
1621 if (len & (map_bankwidth(map)-1)) {
1624 datum = map_word_ff(map);
1625 datum = map_word_load_partial(map, datum, buf, 0, len);
1627 ret = do_write_oneword(map, &cfi->
chips[chipnum],
1640 unsigned long adr,
const struct kvec **pvec,
1641 unsigned long *pvec_seek,
int len)
1645 unsigned long cmd_adr;
1646 int ret, wbufsize, word_gap, words;
1647 const struct kvec *vec;
1648 unsigned long vec_seek;
1649 unsigned long initial_adr;
1650 int initial_len = len;
1652 wbufsize = cfi_interleave(cfi) << cfi->
cfiq->MaxBufWriteSize;
1655 cmd_adr = adr & ~(wbufsize-1);
1680 if (map_word_bitsset(map, status,
CMD(0x30))) {
1682 printk(
KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.
x[0]);
1700 printk(
KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1701 map->
name, Xstatus.
x[0], status.
x[0]);
1706 word_gap = (-adr & (map_bankwidth(map)-1));
1707 words =
DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1711 word_gap = map_bankwidth(map) - word_gap;
1713 datum = map_word_ff(map);
1721 vec_seek = *pvec_seek;
1723 int n = map_bankwidth(map) - word_gap;
1724 if (n > vec->
iov_len - vec_seek)
1729 if (!word_gap && len < map_bankwidth(map))
1730 datum = map_word_ff(map);
1732 datum = map_word_load_partial(map, datum,
1738 if (!len || word_gap == map_bankwidth(map)) {
1740 adr += map_bankwidth(map);
1745 if (vec_seek == vec->
iov_len) {
1751 *pvec_seek = vec_seek;
1758 initial_adr, initial_len,
1771 if (map_word_bitsset(map, status,
CMD(0x1a))) {
1779 if (chipstatus & 0x02) {
1781 }
else if (chipstatus & 0x08) {
1794 put_chip(map, chip, cmd_adr);
1799 static int cfi_intelext_writev (
struct mtd_info *mtd,
const struct kvec *vecs,
1800 unsigned long count, loff_t to,
size_t *retlen)
1804 int wbufsize = cfi_interleave(cfi) << cfi->
cfiq->MaxBufWriteSize;
1807 unsigned long ofs, vec_seek,
i;
1810 for (i = 0; i <
count; i++)
1811 len += vecs[i].iov_len;
1822 int size = wbufsize - (ofs & (wbufsize-1));
1827 ofs, &vecs, &vec_seek,
size);
1851 static int cfi_intelext_write_buffers (
struct mtd_info *mtd, loff_t to,
1852 size_t len,
size_t *retlen,
const u_char *buf)
1859 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1863 unsigned long adr,
int len,
void *thunk)
1911 if (map_word_bitsset(map, status,
CMD(0x3a))) {
1919 if ((chipstatus & 0x30) == 0x30) {
1920 printk(
KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->
name, chipstatus);
1922 }
else if (chipstatus & 0x02) {
1925 }
else if (chipstatus & 0x8) {
1929 }
else if (chipstatus & 0x20 && retries--) {
1930 printk(
KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1932 put_chip(map, chip, adr);
1936 printk(
KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->
name, adr, chipstatus);
1945 put_chip(map, chip, adr);
1952 unsigned long ofs, len;
1968 static void cfi_intelext_sync (
struct mtd_info *mtd)
1976 for (i=0; !ret && i<cfi->
numchips; i++) {
1995 for (i--; i >=0; i--) {
2012 int len,
void *thunk)
2021 status = cfi_read_query(map, adr+(2*ofs_factor));
2026 #ifdef DEBUG_LOCK_BITS
2030 int len,
void *thunk)
2033 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2038 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2039 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2042 unsigned long adr,
int len,
void *thunk)
2083 mdelay = (!extp || !(extp->
FeatureSupport & (1 << 5))) ? 1500 : 0;
2085 ret =
WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2096 put_chip(map, chip, adr);
2101 static int cfi_intelext_lock(
struct mtd_info *mtd, loff_t ofs,
uint64_t len)
2105 #ifdef DEBUG_LOCK_BITS
2107 __func__, ofs, len);
2115 #ifdef DEBUG_LOCK_BITS
2125 static int cfi_intelext_unlock(
struct mtd_info *mtd, loff_t ofs,
uint64_t len)
2129 #ifdef DEBUG_LOCK_BITS
2131 __func__, ofs, len);
2139 #ifdef DEBUG_LOCK_BITS
2149 static int cfi_intelext_is_locked(
struct mtd_info *mtd, loff_t ofs,
2153 ofs, len,
NULL) ? 1 : 0;
2156 #ifdef CONFIG_MTD_OTP
2190 put_chip(map, chip, chip->
start);
2202 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2203 int gap = offset - bus_ofs;
2204 int n =
min_t(
int, size, map_bankwidth(map)-gap);
2207 datum = map_word_load_partial(map, datum, buf, gap, n);
2208 ret = do_write_oneword(map, chip, bus_ofs, datum,
FL_OTP_WRITE);
2231 datum = map_word_ff(map);
2232 datum = map_word_clr(map, datum,
CMD(1 << grpno));
2233 return do_write_oneword(map, chip, prot, datum,
FL_OTP_WRITE);
2236 static int cfi_intelext_otp_walk(
struct mtd_info *mtd, loff_t from,
size_t len,
2237 size_t *retlen,
u_char *buf,
2246 u_int chip_num, chip_step,
field, reg_fact_size, reg_user_size;
2247 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2268 chip_num = chip_step - 1;
2272 for ( ; chip_num < cfi->
numchips; chip_num += chip_step) {
2273 chip = &cfi->
chips[chip_num];
2279 reg_fact_groups = 1;
2281 reg_user_groups = 1;
2286 data_offset = reg_prot_offset + 1;
2293 groups = reg_user_groups;
2294 groupsize = reg_user_size;
2296 groupno = reg_fact_groups;
2297 data_offset += reg_fact_groups * reg_fact_size;
2299 groups = reg_fact_groups;
2300 groupsize = reg_fact_size;
2304 while (len > 0 && groups > 0) {
2315 ret = do_otp_read(map, chip,
2324 otpinfo->
length = groupsize;
2326 !map_word_bitsset(map, lockword,
2329 buf +=
sizeof(*otpinfo);
2330 *retlen +=
sizeof(*otpinfo);
2331 }
else if (from >= groupsize) {
2333 data_offset += groupsize;
2335 int size = groupsize;
2336 data_offset +=
from;
2341 ret =
action(map, chip, data_offset,
2342 buf, size, reg_prot_offset,
2343 groupno, groupsize);
2349 data_offset +=
size;
2370 static int cfi_intelext_read_fact_prot_reg(
struct mtd_info *mtd, loff_t from,
2371 size_t len,
size_t *retlen,
2374 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2375 buf, do_otp_read, 0);
2378 static int cfi_intelext_read_user_prot_reg(
struct mtd_info *mtd, loff_t from,
2379 size_t len,
size_t *retlen,
2382 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2383 buf, do_otp_read, 1);
2386 static int cfi_intelext_write_user_prot_reg(
struct mtd_info *mtd, loff_t from,
2387 size_t len,
size_t *retlen,
2390 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2391 buf, do_otp_write, 1);
2394 static int cfi_intelext_lock_user_prot_reg(
struct mtd_info *mtd,
2395 loff_t from,
size_t len)
2398 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2399 NULL, do_otp_lock, 1);
2402 static int cfi_intelext_get_fact_prot_info(
struct mtd_info *mtd,
2408 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (
u_char *)buf,
NULL, 0);
2409 return ret ? : retlen;
2412 static int cfi_intelext_get_user_prot_info(
struct mtd_info *mtd,
2418 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (
u_char *)buf,
NULL, 1);
2419 return ret ? : retlen;
2424 static void cfi_intelext_save_locks(
struct mtd_info *mtd)
2436 for (block = 0; block < region->
numblocks; block++){
2438 adr = region->
offset + block * len;
2441 do_getlockstatus_oneblock, adr, len,
NULL);
2450 static int cfi_intelext_suspend(
struct mtd_info *mtd)
2461 cfi_intelext_save_locks(mtd);
2463 for (i=0; !ret && i<cfi->
numchips; i++) {
2468 switch (chip->
state) {
2504 for (i--; i >=0; i--) {
2524 static void cfi_intelext_restore_locks(
struct mtd_info *mtd)
2538 adr = region->
offset + block * len;
2539 cfi_intelext_unlock(mtd, adr, len);
2544 static void cfi_intelext_resume(
struct mtd_info *mtd)
2570 cfi_intelext_restore_locks(mtd);
2573 static int cfi_intelext_reset(
struct mtd_info *mtd)
2579 for (i=0; i < cfi->
numchips; i++) {
2590 put_chip(map, chip, chip->
start);
2604 cfi_intelext_reset(mtd);
2608 static void cfi_intelext_destroy(
struct mtd_info *mtd)
2614 cfi_intelext_reset(mtd);