15 #include <linux/kernel.h>
16 #include <linux/module.h>
26 #include <linux/slab.h>
28 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
31 #define MG_DISK_NAME "mgd"
34 #define MG_DISK_MAX_PART 16
35 #define MG_SECTOR_SIZE 512
36 #define MG_MAX_SECTS 256
39 #define MG_BUFF_OFFSET 0x8000
40 #define MG_REG_OFFSET 0xC000
41 #define MG_REG_FEATURE (MG_REG_OFFSET + 2)
42 #define MG_REG_ERROR (MG_REG_OFFSET + 2)
43 #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
44 #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
45 #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
46 #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
47 #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
48 #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE)
49 #define MG_REG_STATUS (MG_REG_OFFSET + 0xE)
50 #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
51 #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
54 #define MG_STAT_READY (ATA_DRDY | ATA_DSC)
55 #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
56 ATA_ERR))) == MG_STAT_READY)
60 #define MG_ERR_TIMEOUT 0x100
61 #define MG_ERR_INIT_STAT 0x101
62 #define MG_ERR_TRANSLATION 0x102
63 #define MG_ERR_CTRL_RST 0x103
64 #define MG_ERR_INV_STAT 0x104
65 #define MG_ERR_RSTOUT 0x105
67 #define MG_MAX_ERRORS 6
70 #define MG_CMD_RD 0x20
71 #define MG_CMD_WR 0x30
72 #define MG_CMD_SLEEP 0x99
73 #define MG_CMD_WAKEUP 0xC3
74 #define MG_CMD_ID 0xEC
75 #define MG_CMD_WR_CONF 0x3C
76 #define MG_CMD_RD_CONF 0x40
79 #define MG_OP_CASCADE (1 << 0)
80 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
81 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
82 #define MG_OP_INTERLEAVE (1 << 3)
85 #define MG_BURST_LAT_4 (3 << 4)
86 #define MG_BURST_LAT_5 (4 << 4)
87 #define MG_BURST_LAT_6 (5 << 4)
88 #define MG_BURST_LAT_7 (6 << 4)
89 #define MG_BURST_LAT_8 (7 << 4)
90 #define MG_BURST_LEN_4 (1 << 1)
91 #define MG_BURST_LEN_8 (2 << 1)
92 #define MG_BURST_LEN_16 (3 << 1)
93 #define MG_BURST_LEN_32 (4 << 1)
94 #define MG_BURST_LEN_CONT (0 << 1)
97 #define MG_TMAX_CONF_TO_CMD 1
98 #define MG_TMAX_WAIT_RD_DRQ 10
99 #define MG_TMAX_WAIT_WR_DRQ 500
100 #define MG_TMAX_RST_TO_BUSY 10
101 #define MG_TMAX_HDRST_TO_RDY 500
102 #define MG_TMAX_SWRST_TO_RDY 500
103 #define MG_TMAX_RSTOUT 3000
105 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
141 # define MG_DBG(fmt, args...) \
142 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
144 # define MG_DBG(fmt, args...) do { } while (0)
149 static bool mg_end_request(
struct mg_host *
host,
int err,
unsigned int nr_bytes)
158 static bool mg_end_request_cur(
struct mg_host *host,
int err)
160 return mg_end_request(host, err, blk_rq_cur_bytes(host->
req));
163 static void mg_dump_status(
const char *
msg,
unsigned int stat,
169 name = host->
req->rq_disk->disk_name;
171 printk(
KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
183 printk(
"CorrectedError ");
187 if ((stat & ATA_ERR) == 0) {
196 printk(
"UncorrectableError ");
198 printk(
"SectorIdNotFound ");
200 printk(
"DriveStatusError ");
202 printk(
"AddrMarkNotFound ");
207 (
unsigned int)blk_rq_pos(host->
req));
216 unsigned long expire, cur_jiffies;
236 if (status & ATA_BUSY) {
237 if (expect == ATA_BUSY)
241 if (status & ATA_ERR) {
242 mg_dump_status(
"mg_wait", status, host);
250 if (expect == ATA_DRQ)
251 if (status & ATA_DRQ)
255 mg_dump_status(
"not ready", status, host);
268 static unsigned int mg_wait_rstout(
u32 rstout,
u32 msec)
270 unsigned long expire;
282 static void mg_unexpected_intr(
struct mg_host *host)
286 mg_dump_status(
"mg_unexpected_intr", status, host);
294 spin_lock(&host->
lock);
302 spin_unlock(&host->
lock);
308 static void mg_id_string(
const u16 *
id,
unsigned char *
s,
309 unsigned int ofs,
unsigned int len)
330 static void mg_id_c_string(
const u16 *
id,
unsigned char *s,
331 unsigned int ofs,
unsigned int len)
335 mg_id_string(
id, s, ofs, len - 1);
338 while (p > s && p[-1] ==
' ')
343 static int mg_get_disk_id(
struct mg_host *host)
347 const u16 *
id = host->
id;
361 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
388 mg_id_c_string(
id, model,
ATA_ID_PROD,
sizeof(model));
389 mg_id_c_string(
id, serial,
ATA_ID_SERNO,
sizeof(serial));
403 static int mg_disk_init(
struct mg_host *host)
437 if (init_status == 0xf)
443 static void mg_bad_rw_intr(
struct mg_host *host)
448 mg_end_request_cur(host, -
EIO);
451 static unsigned int mg_out(
struct mg_host *host,
452 unsigned int sect_num,
453 unsigned int sect_cnt,
455 void (*intr_addr)(
struct mg_host *))
482 u16 *buff = (
u16 *)req->buffer;
485 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
490 static void mg_read(
struct request *req)
492 struct mg_host *host = req->rq_disk->private_data;
494 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
496 mg_bad_rw_intr(host);
498 MG_DBG(
"requested %d sects (from %ld), buffer=0x%p\n",
499 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
502 if (mg_wait(host, ATA_DRQ,
504 mg_bad_rw_intr(host);
508 mg_read_one(host, req);
515 static void mg_write_one(
struct mg_host *host,
struct request *req)
517 u16 *buff = (
u16 *)req->buffer;
520 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
525 static void mg_write(
struct request *req)
527 struct mg_host *host = req->rq_disk->private_data;
528 unsigned int rem = blk_rq_sectors(req);
530 if (mg_out(host, blk_rq_pos(req), rem,
532 mg_bad_rw_intr(host);
536 MG_DBG(
"requested %d sects (from %ld), buffer=0x%p\n",
537 rem, blk_rq_pos(req), req->buffer);
539 if (mg_wait(host, ATA_DRQ,
541 mg_bad_rw_intr(host);
546 mg_write_one(host, req);
552 if (rem > 1 && mg_wait(host, ATA_DRQ,
554 mg_bad_rw_intr(host);
558 mg_bad_rw_intr(host);
564 static void mg_read_intr(
struct mg_host *host)
579 mg_dump_status(
"mg_read_intr", i, host);
580 mg_bad_rw_intr(host);
581 mg_request(host->
breq);
585 mg_read_one(host, req);
587 MG_DBG(
"sector %ld, remaining=%ld, buffer=0x%p\n",
588 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
598 mg_request(host->
breq);
601 static void mg_write_intr(
struct mg_host *host)
614 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
617 mg_dump_status(
"mg_write_intr", i, host);
618 mg_bad_rw_intr(host);
619 mg_request(host->
breq);
625 mg_write_one(host, req);
626 MG_DBG(
"sector %ld, remaining=%ld, buffer=0x%p\n",
627 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
636 mg_request(host->
breq);
644 spin_lock_irq(&host->
lock);
651 name = host->
req->rq_disk->disk_name;
655 mg_bad_rw_intr(host);
658 mg_request(host->
breq);
659 spin_unlock_irq(&host->
lock);
664 struct mg_host *host = q->queuedata;
673 if (
unlikely(host->
req->cmd_type != REQ_TYPE_FS)) {
674 mg_end_request_cur(host, -
EIO);
678 if (rq_data_dir(host->
req) ==
READ)
685 static unsigned int mg_issue_req(
struct request *req,
687 unsigned int sect_num,
688 unsigned int sect_cnt)
690 switch (rq_data_dir(req)) {
692 if (mg_out(host, sect_num, sect_cnt,
MG_CMD_RD, &mg_read_intr)
694 mg_bad_rw_intr(host);
701 if (mg_out(host, sect_num, sect_cnt,
MG_CMD_WR, &mg_write_intr)
703 mg_bad_rw_intr(host);
710 mg_bad_rw_intr(host);
713 mg_write_one(host, req);
725 struct mg_host *host = q->queuedata;
727 u32 sect_num, sect_cnt;
743 sect_num = blk_rq_pos(req);
745 sect_cnt = blk_rq_sectors(req);
748 if (sect_num >= get_capacity(req->rq_disk) ||
749 ((sect_num + sect_cnt) >
750 get_capacity(req->rq_disk))) {
752 "%s: bad access: sector=%d, count=%d\n",
753 req->rq_disk->disk_name,
755 mg_end_request_cur(host, -
EIO);
759 if (
unlikely(req->cmd_type != REQ_TYPE_FS)) {
760 mg_end_request_cur(host, -
EIO);
764 if (!mg_issue_req(req, host, sect_num, sect_cnt))
779 static const struct block_device_operations mg_disk_ops = {
783 static int mg_suspend(
struct device *
dev)
807 static int mg_resume(
struct device *dev)
856 host->
dev = &plat_dev->
dev;
918 err = mg_disk_init(host);
921 __func__, __LINE__, err);
939 __func__, __LINE__, err);
946 err = mg_get_disk_id(host);
949 __func__, __LINE__, err);
957 __func__, __LINE__, err);
1000 host->
gd->first_minor = 0;
1001 host->
gd->fops = &mg_disk_ops;
1002 host->
gd->queue = host->
breq;
1003 host->
gd->private_data =
host;
1078 .remove = mg_remove,
1092 static int __init mg_init(
void)
1098 static void __exit mg_exit(
void)