35 #include <linux/kernel.h>
37 #include <linux/pci.h>
38 #include <linux/module.h>
86 return ioread8(ap->ioaddr.status_addr);
103 static u8 ata_sff_altstatus(
struct ata_port *ap)
105 if (ap->
ops->sff_check_altstatus)
106 return ap->
ops->sff_check_altstatus(ap);
108 return ioread8(ap->ioaddr.altstatus_addr);
123 static u8 ata_sff_irq_status(
struct ata_port *ap)
127 if (ap->
ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
128 status = ata_sff_altstatus(ap);
134 status = ap->
ops->sff_check_status(ap);
150 static void ata_sff_sync(
struct ata_port *ap)
152 if (ap->
ops->sff_check_altstatus)
153 ap->
ops->sff_check_altstatus(ap);
154 else if (ap->ioaddr.altstatus_addr)
155 ioread8(ap->ioaddr.altstatus_addr);
187 if (ap->
ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
190 ata_sff_altstatus(ap);
216 unsigned long tmout_pat,
unsigned long tmout)
218 unsigned long timer_start,
timeout;
221 status = ata_sff_busy_wait(ap,
ATA_BUSY, 300);
223 timeout = ata_deadline(timer_start, tmout_pat);
224 while (status != 0xff && (status &
ATA_BUSY) &&
227 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
230 if (status != 0xff && (status & ATA_BUSY))
232 "port is slow to respond, please be patient (Status 0x%x)\n",
235 timeout = ata_deadline(timer_start, tmout);
236 while (status != 0xff && (status & ATA_BUSY) &&
239 status = ap->
ops->sff_check_status(ap);
245 if (status & ATA_BUSY) {
247 "port failed to respond (%lu secs, Status 0x%x)\n",
258 u8 status = link->
ap->ops->sff_check_status(link->
ap);
260 return ata_check_ready(status);
298 if (ap->
ops->sff_set_devctl)
299 ap->
ops->sff_set_devctl(ap, ctl);
327 iowrite8(tmp, ap->ioaddr.device_addr);
350 static void ata_dev_select(
struct ata_port *ap,
unsigned int device,
351 unsigned int wait,
unsigned int can_sleep)
354 ata_port_info(ap,
"ata_dev_select: ENTER, device %u, wait %u\n",
360 ap->
ops->sff_dev_select(ap, device);
384 struct ata_ioports *ioaddr = &ap->ioaddr;
386 if (ap->
ops->sff_irq_on) {
387 ap->
ops->sff_irq_on(ap);
392 ap->last_ctl = ap->ctl;
394 if (ap->
ops->sff_set_devctl || ioaddr->ctl_addr)
395 ata_sff_set_devctl(ap, ap->ctl);
398 if (ap->
ops->sff_irq_clear)
399 ap->
ops->sff_irq_clear(ap);
415 struct ata_ioports *ioaddr = &ap->ioaddr;
418 if (tf->
ctl != ap->last_ctl) {
419 if (ioaddr->ctl_addr)
421 ap->last_ctl = tf->
ctl;
432 VPRINTK(
"hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
446 VPRINTK(
"feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
478 struct ata_ioports *ioaddr = &ap->ioaddr;
489 if (
likely(ioaddr->ctl_addr)) {
497 ap->last_ctl = tf->
ctl;
536 static inline void ata_tf_to_host(
struct ata_port *ap,
539 ap->
ops->sff_tf_load(ap, tf);
540 ap->
ops->sff_exec_command(ap, tf);
563 unsigned int words = buflen >> 1;
573 unsigned char pad[2] = { };
618 unsigned int words = buflen >> 2;
619 int slop = buflen & 3;
632 unsigned char pad[4] = { };
635 buf += buflen - slop;
655 return (buflen + 1) & ~1;
679 unsigned int consumed;
709 page = sg_page(qc->
cursg);
713 page = nth_page(page, (offset >>
PAGE_SHIFT));
718 if (PageHighMem(page)) {
737 if (!do_write && !PageSlab(page))
761 if (is_multi_taskfile(&qc->
tf)) {
768 qc->
dev->multi_count);
774 ata_sff_sync(qc->
ap);
794 ap->
ops->sff_data_xfer(qc->
dev, qc->
cdb, qc->
dev->cdb_len, 1);
798 switch (qc->
tf.protocol) {
805 #ifdef CONFIG_ATA_BMDMA
809 ap->
ops->bmdma_start(qc);
843 "buf=%u cur=%u bytes=%u",
852 page = nth_page(page, (offset >>
PAGE_SHIFT));
863 if (PageHighMem(page)) {
871 consumed = ap->
ops->sff_data_xfer(dev, buf + offset,
878 consumed = ap->
ops->sff_data_xfer(dev, buf + offset,
882 bytes -=
min(bytes, consumed);
916 unsigned int ireason, bc_lo, bc_hi,
bytes;
929 bytes = (bc_hi << 8) | bc_lo;
936 i_write = ((ireason &
ATAPI_IO) == 0) ? 1 : 0;
945 if (
unlikely(__atapi_pio_bytes(qc, bytes)))
967 static inline int ata_hsm_ok_in_wq(
struct ata_port *ap,
978 if (ata_is_atapi(qc->
tf.protocol) &&
997 static void ata_hsm_qc_complete(
struct ata_queued_cmd *qc,
int in_wq)
1000 unsigned long flags;
1002 if (ap->
ops->error_handler) {
1009 qc = ata_qc_from_tag(ap, qc->
tag);
1018 spin_unlock_irqrestore(ap->
lock, flags);
1030 spin_unlock_irqrestore(ap->
lock, flags);
1047 u8 status,
int in_wq)
1051 unsigned long flags = 0;
1063 DPRINTK(
"ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1085 "ST_FIRST: !(DRQ|ERR|DF)");
1107 "DRQ=1 with device error, "
1108 "dev_stat 0x%X", status);
1133 ata_pio_sectors(qc);
1136 atapi_send_cdb(ap, qc);
1139 spin_unlock_irqrestore(ap->
lock, flags);
1150 if ((status &
ATA_DRQ) == 0) {
1166 "DRQ=1 with device error, "
1167 "dev_stat 0x%X", status);
1173 atapi_pio_bytes(qc);
1191 if (qc->
dev->horkage &
1201 "DRQ=0 without device error, "
1202 "dev_stat 0x%X", status);
1226 ata_pio_sectors(qc);
1227 status = ata_wait_idle(ap);
1230 if (status & (
ATA_BUSY | ATA_DRQ)) {
1232 "BUSY|DRQ persists on ERR|DF, "
1233 "dev_stat 0x%X", status);
1255 ata_pio_sectors(qc);
1260 status = ata_wait_idle(ap);
1270 qc->
err_mask |= __ac_err_mask(status);
1276 DPRINTK(
"ata%u: dev %u command complete, drv_stat 0x%x\n",
1284 ata_hsm_qc_complete(qc, in_wq);
1293 ata_hsm_qc_complete(qc, in_wq);
1323 (ap->sff_pio_task_link != link));
1324 ap->sff_pio_task_link =
link;
1337 ap->sff_pio_task_link =
NULL;
1347 struct ata_link *link = ap->sff_pio_task_link;
1356 ap->sff_pio_task_link =
NULL;
1370 status = ata_sff_busy_wait(ap,
ATA_BUSY, 5);
1373 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1374 if (status & ATA_BUSY) {
1384 ap->sff_pio_task_link =
NULL;
1420 ata_dev_select(ap, qc->
dev->devno, 1, 0);
1423 switch (qc->
tf.protocol) {
1426 ata_qc_set_polling(qc);
1428 ata_tf_to_host(ap, &qc->
tf);
1438 ata_qc_set_polling(qc);
1440 ata_tf_to_host(ap, &qc->
tf);
1468 ata_qc_set_polling(qc);
1470 ata_tf_to_host(ap, &qc->
tf);
1509 static unsigned int ata_sff_idle_irq(
struct ata_port *ap)
1511 ap->
stats.idle_irq++;
1514 if ((ap->
stats.idle_irq % 1000) == 0) {
1515 ap->
ops->sff_check_status(ap);
1516 if (ap->
ops->sff_irq_clear)
1517 ap->
ops->sff_irq_clear(ap);
1525 static unsigned int __ata_sff_port_intr(
struct ata_port *ap,
1531 VPRINTK(
"ata%u: protocol %d task_state %d\n",
1546 return ata_sff_idle_irq(ap);
1549 return ata_sff_idle_irq(ap);
1555 status = ata_sff_irq_status(ap);
1556 if (status & ATA_BUSY) {
1562 return ata_sff_idle_irq(ap);
1566 if (ap->
ops->sff_irq_clear)
1567 ap->
ops->sff_irq_clear(ap);
1589 return __ata_sff_port_intr(ap, qc,
false);
1593 static inline irqreturn_t __ata_sff_interrupt(
int irq,
void *dev_instance,
1597 bool retried =
false;
1599 unsigned int handled,
idle, polling;
1600 unsigned long flags;
1606 handled = idle = polling = 0;
1607 for (i = 0; i < host->
n_ports; i++) {
1611 qc = ata_qc_from_tag(ap, ap->
link.active_tag);
1614 handled |= port_intr(ap, qc);
1626 if (!handled && !retried) {
1629 for (i = 0; i < host->
n_ports; i++) {
1632 if (polling & (1 << i))
1635 if (!ap->
ops->sff_irq_check ||
1636 !ap->
ops->sff_irq_check(ap))
1639 if (idle & (1 << i)) {
1640 ap->
ops->sff_check_status(ap);
1641 if (ap->
ops->sff_irq_clear)
1642 ap->
ops->sff_irq_clear(ap);
1660 spin_unlock_irqrestore(&host->
lock, flags);
1704 qc = ata_qc_from_tag(ap, ap->
link.active_tag);
1710 status = ata_sff_altstatus(ap);
1711 if (status & ATA_BUSY)
1736 ap->last_ctl = ap->ctl;
1738 if (ap->
ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1739 ata_sff_set_devctl(ap, ap->ctl);
1745 ap->
ops->sff_check_status(ap);
1747 if (ap->
ops->sff_irq_clear)
1748 ap->
ops->sff_irq_clear(ap);
1764 ap->
ops->sff_check_status(ap);
1765 if (ap->
ops->sff_irq_clear)
1766 ap->
ops->sff_irq_clear(ap);
1802 if (rc && rc != -
ENODEV) {
1804 "device not ready (errno=%d), forcing hardreset\n",
1831 static unsigned int ata_devchk(
struct ata_port *ap,
unsigned int device)
1833 struct ata_ioports *ioaddr = &ap->ioaddr;
1836 ap->
ops->sff_dev_select(ap, device);
1838 iowrite8(0x55, ioaddr->nsect_addr);
1841 iowrite8(0xaa, ioaddr->nsect_addr);
1844 iowrite8(0x55, ioaddr->nsect_addr);
1847 nsect =
ioread8(ioaddr->nsect_addr);
1848 lbal =
ioread8(ioaddr->lbal_addr);
1850 if ((nsect == 0x55) && (lbal == 0xaa))
1885 ap->
ops->sff_dev_select(ap, dev->
devno);
1887 memset(&tf, 0,
sizeof(tf));
1889 ap->
ops->sff_tf_read(ap, &tf);
1900 else if ((dev->
devno == 0) && (err == 0x81))
1920 (ap->
ops->sff_check_status(ap) == 0))
1945 unsigned long deadline)
1948 struct ata_ioports *ioaddr = &ap->ioaddr;
1949 unsigned int dev0 = devmask & (1 << 0);
1950 unsigned int dev1 = devmask & (1 << 1);
1969 ap->
ops->sff_dev_select(ap, 1);
1975 for (i = 0; i < 2; i++) {
1978 nsect =
ioread8(ioaddr->nsect_addr);
1979 lbal =
ioread8(ioaddr->lbal_addr);
1980 if ((nsect == 1) && (lbal == 1))
1994 ap->
ops->sff_dev_select(ap, 0);
1996 ap->
ops->sff_dev_select(ap, 1);
1998 ap->
ops->sff_dev_select(ap, 0);
2004 static int ata_bus_softreset(
struct ata_port *ap,
unsigned int devmask,
2005 unsigned long deadline)
2007 struct ata_ioports *ioaddr = &ap->ioaddr;
2012 iowrite8(ap->ctl, ioaddr->ctl_addr);
2016 iowrite8(ap->ctl, ioaddr->ctl_addr);
2017 ap->last_ctl = ap->ctl;
2038 unsigned long deadline)
2042 unsigned int devmask = 0;
2049 if (ata_devchk(ap, 0))
2050 devmask |= (1 << 0);
2051 if (slave_possible && ata_devchk(ap, 1))
2052 devmask |= (1 << 1);
2055 ap->
ops->sff_dev_select(ap, 0);
2058 DPRINTK(
"about to softreset, devmask=%x\n", devmask);
2059 rc = ata_bus_softreset(ap, devmask, deadline);
2068 devmask & (1 << 0), &err);
2069 if (slave_possible && err != 0x81)
2071 devmask & (1 << 1), &err);
2073 DPRINTK(
"EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2094 unsigned long deadline)
2097 const unsigned long *
timing = sata_ehc_deb_timing(ehc);
2102 ata_sff_check_ready);
2106 DPRINTK(
"EXIT, class=%u\n", *
class);
2131 ap->
ops->sff_dev_select(ap, 1);
2133 ap->
ops->sff_dev_select(ap, 0);
2142 if (ap->
ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2143 ata_sff_set_devctl(ap, ap->ctl);
2144 ap->last_ctl = ap->ctl;
2170 for (count = 0; (ap->
ops->sff_check_status(ap) &
ATA_DRQ)
2171 && count < 65536; count += 2)
2176 ata_port_dbg(ap,
"drained %d bytes to clear DRQ\n", count);
2198 unsigned long flags;
2200 qc = __ata_qc_from_tag(ap, ap->
link.active_tag);
2213 if (ap->
ops->sff_drain_fifo)
2214 ap->
ops->sff_drain_fifo(qc);
2216 spin_unlock_irqrestore(ap->
lock, flags);
2228 ap->
ops->postreset);
2246 ioaddr->error_addr = ioaddr->cmd_addr +
ATA_REG_ERR;
2254 ioaddr->command_addr = ioaddr->cmd_addr +
ATA_REG_CMD;
2260 static int ata_resources_present(
struct pci_dev *pdev,
int port)
2266 for (i = 0; i < 2; i++) {
2293 int ata_pci_sff_init_host(
struct ata_host *host)
2295 struct device *
gdev = host->
dev;
2297 unsigned int mask = 0;
2301 for (i = 0; i < 2; i++) {
2306 if (ata_port_is_dummy(ap))
2313 if (!ata_resources_present(pdev, i)) {
2322 "failed to request/iomap BARs for port %d (errno=%d)\n",
2331 ap->ioaddr.cmd_addr = iomap[base];
2332 ap->ioaddr.altstatus_addr =
2333 ap->ioaddr.ctl_addr = (
void __iomem *)
2345 dev_err(gdev,
"no available native port\n");
2368 int ata_pci_sff_prepare_host(
struct pci_dev *pdev,
2380 dev_err(&pdev->
dev,
"failed to allocate ATA host\n");
2385 rc = ata_pci_sff_init_host(host);
2415 int ata_pci_sff_activate_host(
struct ata_host *host,
2419 struct device *dev = host->
dev;
2422 int legacy_mode = 0,
rc;
2433 mask = (1 << 2) | (1 << 0);
2434 if ((tmp8 & mask) !=
mask)
2436 #if defined(CONFIG_NO_ATA_LEGACY)
2441 printk(
KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2450 if (!legacy_mode && pdev->
irq) {
2453 rc = devm_request_irq(dev, pdev->
irq, irq_handler,
2458 for (i = 0; i < 2; i++) {
2459 if (ata_port_is_dummy(host->
ports[i]))
2463 }
else if (legacy_mode) {
2464 if (!ata_port_is_dummy(host->
ports[0])) {
2475 if (!ata_port_is_dummy(host->
ports[1])) {
2504 for (i = 0; i < 2 && ppi[
i]; i++)
2511 static int ata_pci_init_one(
struct pci_dev *pdev,
2514 int hflags,
bool bmdma)
2516 struct device *dev = &pdev->
dev;
2523 pi = ata_sff_find_valid_pi(ppi);
2525 dev_err(&pdev->
dev,
"no valid port_info specified\n");
2536 #ifdef CONFIG_ATA_BMDMA
2539 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2547 host->
flags |= hflags;
2549 #ifdef CONFIG_ATA_BMDMA
2552 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2587 int ata_pci_sff_init_one(
struct pci_dev *pdev,
2591 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2601 #ifdef CONFIG_ATA_BMDMA
2606 .error_handler = ata_bmdma_error_handler,
2607 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2609 .qc_prep = ata_bmdma_qc_prep,
2610 .qc_issue = ata_bmdma_qc_issue,
2612 .sff_irq_clear = ata_bmdma_irq_clear,
2613 .bmdma_setup = ata_bmdma_setup,
2614 .bmdma_start = ata_bmdma_start,
2615 .bmdma_stop = ata_bmdma_stop,
2616 .bmdma_status = ata_bmdma_status,
2618 .port_start = ata_bmdma_port_start,
2626 .port_start = ata_bmdma_port_start32,
2646 unsigned int si, pi;
2661 offset = addr & 0xffff;
2663 if ((offset + sg_len) > 0x10000)
2668 VPRINTK(
"PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2697 unsigned int si, pi;
2702 u32 sg_len, len, blen;
2712 offset = addr & 0xffff;
2714 if ((offset + sg_len) > 0x10000)
2717 blen = len & 0xffff;
2728 VPRINTK(
"PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2753 ata_bmdma_fill_sg(qc);
2771 ata_bmdma_fill_sg_dumb(qc);
2795 if (!ata_is_dma(qc->
tf.protocol))
2799 ata_dev_select(ap, qc->
dev->devno, 1, 0);
2802 switch (qc->
tf.protocol) {
2806 ap->
ops->sff_tf_load(ap, &qc->
tf);
2807 ap->
ops->bmdma_setup(qc);
2808 ap->
ops->bmdma_start(qc);
2815 ap->
ops->sff_tf_load(ap, &qc->
tf);
2816 ap->
ops->bmdma_setup(qc);
2850 bool bmdma_stopped =
false;
2851 unsigned int handled;
2855 host_stat = ap->
ops->bmdma_status(ap);
2860 return ata_sff_idle_irq(ap);
2863 ap->
ops->bmdma_stop(qc);
2864 bmdma_stopped =
true;
2873 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2896 irqreturn_t ata_bmdma_interrupt(
int irq,
void *dev_instance)
2898 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2914 void ata_bmdma_error_handler(
struct ata_port *ap)
2917 unsigned long flags;
2920 qc = __ata_qc_from_tag(ap, ap->
link.active_tag);
2927 if (qc && ata_is_dma(qc->
tf.protocol)) {
2930 host_stat = ap->
ops->bmdma_status(ap);
2942 ap->
ops->bmdma_stop(qc);
2946 ap->
ops->sff_check_status(ap);
2947 if (ap->
ops->sff_irq_clear)
2948 ap->
ops->sff_irq_clear(ap);
2952 spin_unlock_irqrestore(ap->
lock, flags);
2971 unsigned long flags;
2973 if (ata_is_dma(qc->
tf.protocol)) {
2975 ap->
ops->bmdma_stop(qc);
2976 spin_unlock_irqrestore(ap->
lock, flags);
2992 void ata_bmdma_irq_clear(
struct ata_port *ap)
2994 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3028 ap->
ops->sff_exec_command(ap, &qc->
tf);
3079 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3120 int ata_bmdma_port_start(
struct ata_port *ap)
3148 int ata_bmdma_port_start32(
struct ata_port *ap)
3151 return ata_bmdma_port_start(ap);
3166 int ata_pci_bmdma_clear_simplex(
struct pci_dev *pdev)
3174 simplex =
inb(bmdma + 0x02);
3175 outb(simplex & 0x60, bmdma + 0x02);
3176 simplex =
inb(bmdma + 0x02);
3183 static void ata_bmdma_nodma(
struct ata_host *host,
const char *
reason)
3187 dev_err(host->
dev,
"BMDMA: %s, falling back to PIO\n", reason);
3189 for (i = 0; i < 2; i++) {
3190 host->
ports[
i]->mwdma_mask = 0;
3191 host->
ports[
i]->udma_mask = 0;
3204 void ata_pci_bmdma_init(
struct ata_host *host)
3206 struct device *gdev = host->
dev;
3212 ata_bmdma_nodma(host,
"BAR4 is zero");
3224 ata_bmdma_nodma(host,
"failed to set dma mask");
3228 ata_bmdma_nodma(host,
3229 "failed to set consistent dma mask");
3235 ata_bmdma_nodma(host,
"failed to request/iomap BAR4");
3240 for (i = 0; i < 2; i++) {
3244 if (ata_port_is_dummy(ap))
3247 ap->ioaddr.bmdma_addr =
bmdma;
3273 int ata_pci_bmdma_prepare_host(
struct pci_dev *pdev,
3279 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3283 ata_pci_bmdma_init(*r_host);
3305 int ata_pci_bmdma_init_one(
struct pci_dev *pdev,
3310 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3331 ap->last_ctl = 0xFF;