Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
libata-sff.c
Go to the documentation of this file.
1 /*
2  * libata-sff.c - helper library for PCI IDE BMDMA
3  *
4  * Maintained by: Jeff Garzik <[email protected]>
5  * Please ALWAYS copy [email protected]
6  * on emails.
7  *
8  * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9  * Copyright 2003-2006 Jeff Garzik
10  *
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; see the file COPYING. If not, write to
24  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  * libata documentation is available via 'make {ps|pdf}docs',
28  * as Documentation/DocBook/libata.*
29  *
30  * Hardware documentation available from http://www.t13.org/ and
31  * http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/pci.h>
38 #include <linux/module.h>
39 #include <linux/libata.h>
40 #include <linux/highmem.h>
41 
42 #include "libata.h"
43 
44 static struct workqueue_struct *ata_sff_wq;
45 
47  .inherits = &ata_base_port_ops,
48 
49  .qc_prep = ata_noop_qc_prep,
50  .qc_issue = ata_sff_qc_issue,
51  .qc_fill_rtf = ata_sff_qc_fill_rtf,
52 
53  .freeze = ata_sff_freeze,
54  .thaw = ata_sff_thaw,
55  .prereset = ata_sff_prereset,
56  .softreset = ata_sff_softreset,
57  .hardreset = sata_sff_hardreset,
58  .postreset = ata_sff_postreset,
59  .error_handler = ata_sff_error_handler,
60 
61  .sff_dev_select = ata_sff_dev_select,
62  .sff_check_status = ata_sff_check_status,
63  .sff_tf_load = ata_sff_tf_load,
64  .sff_tf_read = ata_sff_tf_read,
65  .sff_exec_command = ata_sff_exec_command,
66  .sff_data_xfer = ata_sff_data_xfer,
67  .sff_drain_fifo = ata_sff_drain_fifo,
68 
69  .lost_interrupt = ata_sff_lost_interrupt,
70 };
71 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
72 
85 {
86  return ioread8(ap->ioaddr.status_addr);
87 }
89 
103 static u8 ata_sff_altstatus(struct ata_port *ap)
104 {
105  if (ap->ops->sff_check_altstatus)
106  return ap->ops->sff_check_altstatus(ap);
107 
108  return ioread8(ap->ioaddr.altstatus_addr);
109 }
110 
123 static u8 ata_sff_irq_status(struct ata_port *ap)
124 {
125  u8 status;
126 
127  if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
128  status = ata_sff_altstatus(ap);
129  /* Not us: We are busy */
130  if (status & ATA_BUSY)
131  return status;
132  }
133  /* Clear INTRQ latch */
134  status = ap->ops->sff_check_status(ap);
135  return status;
136 }
137 
150 static void ata_sff_sync(struct ata_port *ap)
151 {
152  if (ap->ops->sff_check_altstatus)
153  ap->ops->sff_check_altstatus(ap);
154  else if (ap->ioaddr.altstatus_addr)
155  ioread8(ap->ioaddr.altstatus_addr);
156 }
157 
170 void ata_sff_pause(struct ata_port *ap)
171 {
172  ata_sff_sync(ap);
173  ndelay(400);
174 }
176 
185 void ata_sff_dma_pause(struct ata_port *ap)
186 {
187  if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
188  /* An altstatus read will cause the needed delay without
189  messing up the IRQ status */
190  ata_sff_altstatus(ap);
191  return;
192  }
193  /* There are no DMA controllers without ctl. BUG here to ensure
194  we never violate the HDMA1:0 transition timing and risk
195  corruption. */
196  BUG();
197 }
199 
216  unsigned long tmout_pat, unsigned long tmout)
217 {
218  unsigned long timer_start, timeout;
219  u8 status;
220 
221  status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
222  timer_start = jiffies;
223  timeout = ata_deadline(timer_start, tmout_pat);
224  while (status != 0xff && (status & ATA_BUSY) &&
225  time_before(jiffies, timeout)) {
226  ata_msleep(ap, 50);
227  status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
228  }
229 
230  if (status != 0xff && (status & ATA_BUSY))
231  ata_port_warn(ap,
232  "port is slow to respond, please be patient (Status 0x%x)\n",
233  status);
234 
235  timeout = ata_deadline(timer_start, tmout);
236  while (status != 0xff && (status & ATA_BUSY) &&
237  time_before(jiffies, timeout)) {
238  ata_msleep(ap, 50);
239  status = ap->ops->sff_check_status(ap);
240  }
241 
242  if (status == 0xff)
243  return -ENODEV;
244 
245  if (status & ATA_BUSY) {
246  ata_port_err(ap,
247  "port failed to respond (%lu secs, Status 0x%x)\n",
248  DIV_ROUND_UP(tmout, 1000), status);
249  return -EBUSY;
250  }
251 
252  return 0;
253 }
255 
256 static int ata_sff_check_ready(struct ata_link *link)
257 {
258  u8 status = link->ap->ops->sff_check_status(link->ap);
259 
260  return ata_check_ready(status);
261 }
262 
277 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
278 {
279  return ata_wait_ready(link, deadline, ata_sff_check_ready);
280 }
282 
296 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
297 {
298  if (ap->ops->sff_set_devctl)
299  ap->ops->sff_set_devctl(ap, ctl);
300  else
301  iowrite8(ctl, ap->ioaddr.ctl_addr);
302 }
303 
318 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
319 {
320  u8 tmp;
321 
322  if (device == 0)
323  tmp = ATA_DEVICE_OBS;
324  else
325  tmp = ATA_DEVICE_OBS | ATA_DEV1;
326 
327  iowrite8(tmp, ap->ioaddr.device_addr);
328  ata_sff_pause(ap); /* needed; also flushes, for mmio */
329 }
331 
350 static void ata_dev_select(struct ata_port *ap, unsigned int device,
351  unsigned int wait, unsigned int can_sleep)
352 {
353  if (ata_msg_probe(ap))
354  ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
355  device, wait);
356 
357  if (wait)
358  ata_wait_idle(ap);
359 
360  ap->ops->sff_dev_select(ap, device);
361 
362  if (wait) {
363  if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
364  ata_msleep(ap, 150);
365  ata_wait_idle(ap);
366  }
367 }
368 
382 void ata_sff_irq_on(struct ata_port *ap)
383 {
384  struct ata_ioports *ioaddr = &ap->ioaddr;
385 
386  if (ap->ops->sff_irq_on) {
387  ap->ops->sff_irq_on(ap);
388  return;
389  }
390 
391  ap->ctl &= ~ATA_NIEN;
392  ap->last_ctl = ap->ctl;
393 
394  if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
395  ata_sff_set_devctl(ap, ap->ctl);
396  ata_wait_idle(ap);
397 
398  if (ap->ops->sff_irq_clear)
399  ap->ops->sff_irq_clear(ap);
400 }
402 
413 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
414 {
415  struct ata_ioports *ioaddr = &ap->ioaddr;
416  unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
417 
418  if (tf->ctl != ap->last_ctl) {
419  if (ioaddr->ctl_addr)
420  iowrite8(tf->ctl, ioaddr->ctl_addr);
421  ap->last_ctl = tf->ctl;
422  ata_wait_idle(ap);
423  }
424 
425  if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
426  WARN_ON_ONCE(!ioaddr->ctl_addr);
427  iowrite8(tf->hob_feature, ioaddr->feature_addr);
428  iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
429  iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
430  iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
431  iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
432  VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
433  tf->hob_feature,
434  tf->hob_nsect,
435  tf->hob_lbal,
436  tf->hob_lbam,
437  tf->hob_lbah);
438  }
439 
440  if (is_addr) {
441  iowrite8(tf->feature, ioaddr->feature_addr);
442  iowrite8(tf->nsect, ioaddr->nsect_addr);
443  iowrite8(tf->lbal, ioaddr->lbal_addr);
444  iowrite8(tf->lbam, ioaddr->lbam_addr);
445  iowrite8(tf->lbah, ioaddr->lbah_addr);
446  VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
447  tf->feature,
448  tf->nsect,
449  tf->lbal,
450  tf->lbam,
451  tf->lbah);
452  }
453 
454  if (tf->flags & ATA_TFLAG_DEVICE) {
455  iowrite8(tf->device, ioaddr->device_addr);
456  VPRINTK("device 0x%X\n", tf->device);
457  }
458 
459  ata_wait_idle(ap);
460 }
462 
476 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
477 {
478  struct ata_ioports *ioaddr = &ap->ioaddr;
479 
480  tf->command = ata_sff_check_status(ap);
481  tf->feature = ioread8(ioaddr->error_addr);
482  tf->nsect = ioread8(ioaddr->nsect_addr);
483  tf->lbal = ioread8(ioaddr->lbal_addr);
484  tf->lbam = ioread8(ioaddr->lbam_addr);
485  tf->lbah = ioread8(ioaddr->lbah_addr);
486  tf->device = ioread8(ioaddr->device_addr);
487 
488  if (tf->flags & ATA_TFLAG_LBA48) {
489  if (likely(ioaddr->ctl_addr)) {
490  iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
491  tf->hob_feature = ioread8(ioaddr->error_addr);
492  tf->hob_nsect = ioread8(ioaddr->nsect_addr);
493  tf->hob_lbal = ioread8(ioaddr->lbal_addr);
494  tf->hob_lbam = ioread8(ioaddr->lbam_addr);
495  tf->hob_lbah = ioread8(ioaddr->lbah_addr);
496  iowrite8(tf->ctl, ioaddr->ctl_addr);
497  ap->last_ctl = tf->ctl;
498  } else
499  WARN_ON_ONCE(1);
500  }
501 }
503 
515 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
516 {
517  DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
518 
519  iowrite8(tf->command, ap->ioaddr.command_addr);
520  ata_sff_pause(ap);
521 }
523 
536 static inline void ata_tf_to_host(struct ata_port *ap,
537  const struct ata_taskfile *tf)
538 {
539  ap->ops->sff_tf_load(ap, tf);
540  ap->ops->sff_exec_command(ap, tf);
541 }
542 
558 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
559  unsigned int buflen, int rw)
560 {
561  struct ata_port *ap = dev->link->ap;
562  void __iomem *data_addr = ap->ioaddr.data_addr;
563  unsigned int words = buflen >> 1;
564 
565  /* Transfer multiple of 2 bytes */
566  if (rw == READ)
567  ioread16_rep(data_addr, buf, words);
568  else
569  iowrite16_rep(data_addr, buf, words);
570 
571  /* Transfer trailing byte, if any. */
572  if (unlikely(buflen & 0x01)) {
573  unsigned char pad[2] = { };
574 
575  /* Point buf to the tail of buffer */
576  buf += buflen - 1;
577 
578  /*
579  * Use io*16_rep() accessors here as well to avoid pointlessly
580  * swapping bytes to and from on the big endian machines...
581  */
582  if (rw == READ) {
583  ioread16_rep(data_addr, pad, 1);
584  *buf = pad[0];
585  } else {
586  pad[0] = *buf;
587  iowrite16_rep(data_addr, pad, 1);
588  }
589  words++;
590  }
591 
592  return words << 1;
593 }
595 
613 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
614  unsigned int buflen, int rw)
615 {
616  struct ata_port *ap = dev->link->ap;
617  void __iomem *data_addr = ap->ioaddr.data_addr;
618  unsigned int words = buflen >> 2;
619  int slop = buflen & 3;
620 
621  if (!(ap->pflags & ATA_PFLAG_PIO32))
622  return ata_sff_data_xfer(dev, buf, buflen, rw);
623 
624  /* Transfer multiple of 4 bytes */
625  if (rw == READ)
626  ioread32_rep(data_addr, buf, words);
627  else
628  iowrite32_rep(data_addr, buf, words);
629 
630  /* Transfer trailing bytes, if any */
631  if (unlikely(slop)) {
632  unsigned char pad[4] = { };
633 
634  /* Point buf to the tail of buffer */
635  buf += buflen - slop;
636 
637  /*
638  * Use io*_rep() accessors here as well to avoid pointlessly
639  * swapping bytes to and from on the big endian machines...
640  */
641  if (rw == READ) {
642  if (slop < 3)
643  ioread16_rep(data_addr, pad, 1);
644  else
645  ioread32_rep(data_addr, pad, 1);
646  memcpy(buf, pad, slop);
647  } else {
648  memcpy(pad, buf, slop);
649  if (slop < 3)
650  iowrite16_rep(data_addr, pad, 1);
651  else
652  iowrite32_rep(data_addr, pad, 1);
653  }
654  }
655  return (buflen + 1) & ~1;
656 }
658 
675 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
676  unsigned int buflen, int rw)
677 {
678  unsigned long flags;
679  unsigned int consumed;
680 
681  local_irq_save(flags);
682  consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
683  local_irq_restore(flags);
684 
685  return consumed;
686 }
688 
698 static void ata_pio_sector(struct ata_queued_cmd *qc)
699 {
700  int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
701  struct ata_port *ap = qc->ap;
702  struct page *page;
703  unsigned int offset;
704  unsigned char *buf;
705 
706  if (qc->curbytes == qc->nbytes - qc->sect_size)
708 
709  page = sg_page(qc->cursg);
710  offset = qc->cursg->offset + qc->cursg_ofs;
711 
712  /* get the current page and offset */
713  page = nth_page(page, (offset >> PAGE_SHIFT));
714  offset %= PAGE_SIZE;
715 
716  DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
717 
718  if (PageHighMem(page)) {
719  unsigned long flags;
720 
721  /* FIXME: use a bounce buffer */
722  local_irq_save(flags);
723  buf = kmap_atomic(page);
724 
725  /* do the actual data transfer */
726  ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
727  do_write);
728 
729  kunmap_atomic(buf);
730  local_irq_restore(flags);
731  } else {
732  buf = page_address(page);
733  ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
734  do_write);
735  }
736 
737  if (!do_write && !PageSlab(page))
738  flush_dcache_page(page);
739 
740  qc->curbytes += qc->sect_size;
741  qc->cursg_ofs += qc->sect_size;
742 
743  if (qc->cursg_ofs == qc->cursg->length) {
744  qc->cursg = sg_next(qc->cursg);
745  qc->cursg_ofs = 0;
746  }
747 }
748 
759 static void ata_pio_sectors(struct ata_queued_cmd *qc)
760 {
761  if (is_multi_taskfile(&qc->tf)) {
762  /* READ/WRITE MULTIPLE */
763  unsigned int nsect;
764 
765  WARN_ON_ONCE(qc->dev->multi_count == 0);
766 
767  nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
768  qc->dev->multi_count);
769  while (nsect--)
770  ata_pio_sector(qc);
771  } else
772  ata_pio_sector(qc);
773 
774  ata_sff_sync(qc->ap); /* flush */
775 }
776 
788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
789 {
790  /* send SCSI cdb */
791  DPRINTK("send cdb\n");
792  WARN_ON_ONCE(qc->dev->cdb_len < 12);
793 
794  ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
795  ata_sff_sync(ap);
796  /* FIXME: If the CDB is for DMA do we need to do the transition delay
797  or is bmdma_start guaranteed to do it ? */
798  switch (qc->tf.protocol) {
799  case ATAPI_PROT_PIO:
800  ap->hsm_task_state = HSM_ST;
801  break;
802  case ATAPI_PROT_NODATA:
804  break;
805 #ifdef CONFIG_ATA_BMDMA
806  case ATAPI_PROT_DMA:
808  /* initiate bmdma */
809  ap->ops->bmdma_start(qc);
810  break;
811 #endif /* CONFIG_ATA_BMDMA */
812  default:
813  BUG();
814  }
815 }
816 
828 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
829 {
830  int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
831  struct ata_port *ap = qc->ap;
832  struct ata_device *dev = qc->dev;
833  struct ata_eh_info *ehi = &dev->link->eh_info;
834  struct scatterlist *sg;
835  struct page *page;
836  unsigned char *buf;
837  unsigned int offset, count, consumed;
838 
839 next_sg:
840  sg = qc->cursg;
841  if (unlikely(!sg)) {
842  ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
843  "buf=%u cur=%u bytes=%u",
844  qc->nbytes, qc->curbytes, bytes);
845  return -1;
846  }
847 
848  page = sg_page(sg);
849  offset = sg->offset + qc->cursg_ofs;
850 
851  /* get the current page and offset */
852  page = nth_page(page, (offset >> PAGE_SHIFT));
853  offset %= PAGE_SIZE;
854 
855  /* don't overrun current sg */
856  count = min(sg->length - qc->cursg_ofs, bytes);
857 
858  /* don't cross page boundaries */
859  count = min(count, (unsigned int)PAGE_SIZE - offset);
860 
861  DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
862 
863  if (PageHighMem(page)) {
864  unsigned long flags;
865 
866  /* FIXME: use bounce buffer */
867  local_irq_save(flags);
868  buf = kmap_atomic(page);
869 
870  /* do the actual data transfer */
871  consumed = ap->ops->sff_data_xfer(dev, buf + offset,
872  count, rw);
873 
874  kunmap_atomic(buf);
875  local_irq_restore(flags);
876  } else {
877  buf = page_address(page);
878  consumed = ap->ops->sff_data_xfer(dev, buf + offset,
879  count, rw);
880  }
881 
882  bytes -= min(bytes, consumed);
883  qc->curbytes += count;
884  qc->cursg_ofs += count;
885 
886  if (qc->cursg_ofs == sg->length) {
887  qc->cursg = sg_next(qc->cursg);
888  qc->cursg_ofs = 0;
889  }
890 
891  /*
892  * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
893  * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
894  * check correctly as it doesn't know if it is the last request being
895  * made. Somebody should implement a proper sanity check.
896  */
897  if (bytes)
898  goto next_sg;
899  return 0;
900 }
901 
911 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
912 {
913  struct ata_port *ap = qc->ap;
914  struct ata_device *dev = qc->dev;
915  struct ata_eh_info *ehi = &dev->link->eh_info;
916  unsigned int ireason, bc_lo, bc_hi, bytes;
917  int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
918 
919  /* Abuse qc->result_tf for temp storage of intermediate TF
920  * here to save some kernel stack usage.
921  * For normal completion, qc->result_tf is not relevant. For
922  * error, qc->result_tf is later overwritten by ata_qc_complete().
923  * So, the correctness of qc->result_tf is not affected.
924  */
925  ap->ops->sff_tf_read(ap, &qc->result_tf);
926  ireason = qc->result_tf.nsect;
927  bc_lo = qc->result_tf.lbam;
928  bc_hi = qc->result_tf.lbah;
929  bytes = (bc_hi << 8) | bc_lo;
930 
931  /* shall be cleared to zero, indicating xfer of data */
932  if (unlikely(ireason & ATAPI_COD))
933  goto atapi_check;
934 
935  /* make sure transfer direction matches expected */
936  i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
937  if (unlikely(do_write != i_write))
938  goto atapi_check;
939 
940  if (unlikely(!bytes))
941  goto atapi_check;
942 
943  VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
944 
945  if (unlikely(__atapi_pio_bytes(qc, bytes)))
946  goto err_out;
947  ata_sff_sync(ap); /* flush */
948 
949  return;
950 
951  atapi_check:
952  ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
953  ireason, bytes);
954  err_out:
955  qc->err_mask |= AC_ERR_HSM;
957 }
958 
967 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
968  struct ata_queued_cmd *qc)
969 {
970  if (qc->tf.flags & ATA_TFLAG_POLLING)
971  return 1;
972 
973  if (ap->hsm_task_state == HSM_ST_FIRST) {
974  if (qc->tf.protocol == ATA_PROT_PIO &&
975  (qc->tf.flags & ATA_TFLAG_WRITE))
976  return 1;
977 
978  if (ata_is_atapi(qc->tf.protocol) &&
979  !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
980  return 1;
981  }
982 
983  return 0;
984 }
985 
997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998 {
999  struct ata_port *ap = qc->ap;
1000  unsigned long flags;
1001 
1002  if (ap->ops->error_handler) {
1003  if (in_wq) {
1004  spin_lock_irqsave(ap->lock, flags);
1005 
1006  /* EH might have kicked in while host lock is
1007  * released.
1008  */
1009  qc = ata_qc_from_tag(ap, qc->tag);
1010  if (qc) {
1011  if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1012  ata_sff_irq_on(ap);
1013  ata_qc_complete(qc);
1014  } else
1015  ata_port_freeze(ap);
1016  }
1017 
1018  spin_unlock_irqrestore(ap->lock, flags);
1019  } else {
1020  if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021  ata_qc_complete(qc);
1022  else
1023  ata_port_freeze(ap);
1024  }
1025  } else {
1026  if (in_wq) {
1027  spin_lock_irqsave(ap->lock, flags);
1028  ata_sff_irq_on(ap);
1029  ata_qc_complete(qc);
1030  spin_unlock_irqrestore(ap->lock, flags);
1031  } else
1032  ata_qc_complete(qc);
1033  }
1034 }
1035 
1046 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1047  u8 status, int in_wq)
1048 {
1049  struct ata_link *link = qc->dev->link;
1050  struct ata_eh_info *ehi = &link->eh_info;
1051  unsigned long flags = 0;
1052  int poll_next;
1053 
1054  WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055 
1056  /* Make sure ata_sff_qc_issue() does not throw things
1057  * like DMA polling into the workqueue. Notice that
1058  * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1059  */
1060  WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1061 
1062 fsm_start:
1063  DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1064  ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1065 
1066  switch (ap->hsm_task_state) {
1067  case HSM_ST_FIRST:
1068  /* Send first data block or PACKET CDB */
1069 
1070  /* If polling, we will stay in the work queue after
1071  * sending the data. Otherwise, interrupt handler
1072  * takes over after sending the data.
1073  */
1074  poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1075 
1076  /* check device status */
1077  if (unlikely((status & ATA_DRQ) == 0)) {
1078  /* handle BSY=0, DRQ=0 as error */
1079  if (likely(status & (ATA_ERR | ATA_DF)))
1080  /* device stops HSM for abort/error */
1081  qc->err_mask |= AC_ERR_DEV;
1082  else {
1083  /* HSM violation. Let EH handle this */
1084  ata_ehi_push_desc(ehi,
1085  "ST_FIRST: !(DRQ|ERR|DF)");
1086  qc->err_mask |= AC_ERR_HSM;
1087  }
1088 
1089  ap->hsm_task_state = HSM_ST_ERR;
1090  goto fsm_start;
1091  }
1092 
1093  /* Device should not ask for data transfer (DRQ=1)
1094  * when it finds something wrong.
1095  * We ignore DRQ here and stop the HSM by
1096  * changing hsm_task_state to HSM_ST_ERR and
1097  * let the EH abort the command or reset the device.
1098  */
1099  if (unlikely(status & (ATA_ERR | ATA_DF))) {
1100  /* Some ATAPI tape drives forget to clear the ERR bit
1101  * when doing the next command (mostly request sense).
1102  * We ignore ERR here to workaround and proceed sending
1103  * the CDB.
1104  */
1105  if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1106  ata_ehi_push_desc(ehi, "ST_FIRST: "
1107  "DRQ=1 with device error, "
1108  "dev_stat 0x%X", status);
1109  qc->err_mask |= AC_ERR_HSM;
1110  ap->hsm_task_state = HSM_ST_ERR;
1111  goto fsm_start;
1112  }
1113  }
1114 
1115  /* Send the CDB (atapi) or the first data block (ata pio out).
1116  * During the state transition, interrupt handler shouldn't
1117  * be invoked before the data transfer is complete and
1118  * hsm_task_state is changed. Hence, the following locking.
1119  */
1120  if (in_wq)
1121  spin_lock_irqsave(ap->lock, flags);
1122 
1123  if (qc->tf.protocol == ATA_PROT_PIO) {
1124  /* PIO data out protocol.
1125  * send first data block.
1126  */
1127 
1128  /* ata_pio_sectors() might change the state
1129  * to HSM_ST_LAST. so, the state is changed here
1130  * before ata_pio_sectors().
1131  */
1132  ap->hsm_task_state = HSM_ST;
1133  ata_pio_sectors(qc);
1134  } else
1135  /* send CDB */
1136  atapi_send_cdb(ap, qc);
1137 
1138  if (in_wq)
1139  spin_unlock_irqrestore(ap->lock, flags);
1140 
1141  /* if polling, ata_sff_pio_task() handles the rest.
1142  * otherwise, interrupt handler takes over from here.
1143  */
1144  break;
1145 
1146  case HSM_ST:
1147  /* complete command or read/write the data register */
1148  if (qc->tf.protocol == ATAPI_PROT_PIO) {
1149  /* ATAPI PIO protocol */
1150  if ((status & ATA_DRQ) == 0) {
1151  /* No more data to transfer or device error.
1152  * Device error will be tagged in HSM_ST_LAST.
1153  */
1155  goto fsm_start;
1156  }
1157 
1158  /* Device should not ask for data transfer (DRQ=1)
1159  * when it finds something wrong.
1160  * We ignore DRQ here and stop the HSM by
1161  * changing hsm_task_state to HSM_ST_ERR and
1162  * let the EH abort the command or reset the device.
1163  */
1164  if (unlikely(status & (ATA_ERR | ATA_DF))) {
1165  ata_ehi_push_desc(ehi, "ST-ATAPI: "
1166  "DRQ=1 with device error, "
1167  "dev_stat 0x%X", status);
1168  qc->err_mask |= AC_ERR_HSM;
1169  ap->hsm_task_state = HSM_ST_ERR;
1170  goto fsm_start;
1171  }
1172 
1173  atapi_pio_bytes(qc);
1174 
1175  if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1176  /* bad ireason reported by device */
1177  goto fsm_start;
1178 
1179  } else {
1180  /* ATA PIO protocol */
1181  if (unlikely((status & ATA_DRQ) == 0)) {
1182  /* handle BSY=0, DRQ=0 as error */
1183  if (likely(status & (ATA_ERR | ATA_DF))) {
1184  /* device stops HSM for abort/error */
1185  qc->err_mask |= AC_ERR_DEV;
1186 
1187  /* If diagnostic failed and this is
1188  * IDENTIFY, it's likely a phantom
1189  * device. Mark hint.
1190  */
1191  if (qc->dev->horkage &
1193  qc->err_mask |=
1195  } else {
1196  /* HSM violation. Let EH handle this.
1197  * Phantom devices also trigger this
1198  * condition. Mark hint.
1199  */
1200  ata_ehi_push_desc(ehi, "ST-ATA: "
1201  "DRQ=0 without device error, "
1202  "dev_stat 0x%X", status);
1203  qc->err_mask |= AC_ERR_HSM |
1205  }
1206 
1207  ap->hsm_task_state = HSM_ST_ERR;
1208  goto fsm_start;
1209  }
1210 
1211  /* For PIO reads, some devices may ask for
1212  * data transfer (DRQ=1) alone with ERR=1.
1213  * We respect DRQ here and transfer one
1214  * block of junk data before changing the
1215  * hsm_task_state to HSM_ST_ERR.
1216  *
1217  * For PIO writes, ERR=1 DRQ=1 doesn't make
1218  * sense since the data block has been
1219  * transferred to the device.
1220  */
1221  if (unlikely(status & (ATA_ERR | ATA_DF))) {
1222  /* data might be corrputed */
1223  qc->err_mask |= AC_ERR_DEV;
1224 
1225  if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1226  ata_pio_sectors(qc);
1227  status = ata_wait_idle(ap);
1228  }
1229 
1230  if (status & (ATA_BUSY | ATA_DRQ)) {
1231  ata_ehi_push_desc(ehi, "ST-ATA: "
1232  "BUSY|DRQ persists on ERR|DF, "
1233  "dev_stat 0x%X", status);
1234  qc->err_mask |= AC_ERR_HSM;
1235  }
1236 
1237  /* There are oddball controllers with
1238  * status register stuck at 0x7f and
1239  * lbal/m/h at zero which makes it
1240  * pass all other presence detection
1241  * mechanisms we have. Set NODEV_HINT
1242  * for it. Kernel bz#7241.
1243  */
1244  if (status == 0x7f)
1245  qc->err_mask |= AC_ERR_NODEV_HINT;
1246 
1247  /* ata_pio_sectors() might change the
1248  * state to HSM_ST_LAST. so, the state
1249  * is changed after ata_pio_sectors().
1250  */
1251  ap->hsm_task_state = HSM_ST_ERR;
1252  goto fsm_start;
1253  }
1254 
1255  ata_pio_sectors(qc);
1256 
1257  if (ap->hsm_task_state == HSM_ST_LAST &&
1258  (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1259  /* all data read */
1260  status = ata_wait_idle(ap);
1261  goto fsm_start;
1262  }
1263  }
1264 
1265  poll_next = 1;
1266  break;
1267 
1268  case HSM_ST_LAST:
1269  if (unlikely(!ata_ok(status))) {
1270  qc->err_mask |= __ac_err_mask(status);
1271  ap->hsm_task_state = HSM_ST_ERR;
1272  goto fsm_start;
1273  }
1274 
1275  /* no more data to transfer */
1276  DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1277  ap->print_id, qc->dev->devno, status);
1278 
1280 
1282 
1283  /* complete taskfile transaction */
1284  ata_hsm_qc_complete(qc, in_wq);
1285 
1286  poll_next = 0;
1287  break;
1288 
1289  case HSM_ST_ERR:
1291 
1292  /* complete taskfile transaction */
1293  ata_hsm_qc_complete(qc, in_wq);
1294 
1295  poll_next = 0;
1296  break;
1297  default:
1298  poll_next = 0;
1299  BUG();
1300  }
1301 
1302  return poll_next;
1303 }
1305 
1307 {
1308  queue_work(ata_sff_wq, work);
1309 }
1311 
1312 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1313 {
1314  queue_delayed_work(ata_sff_wq, dwork, delay);
1315 }
1317 
1318 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1319 {
1320  struct ata_port *ap = link->ap;
1321 
1322  WARN_ON((ap->sff_pio_task_link != NULL) &&
1323  (ap->sff_pio_task_link != link));
1324  ap->sff_pio_task_link = link;
1325 
1326  /* may fail if ata_sff_flush_pio_task() in progress */
1327  ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1328 }
1330 
1332 {
1333  DPRINTK("ENTER\n");
1334 
1335  cancel_delayed_work_sync(&ap->sff_pio_task);
1337  ap->sff_pio_task_link = NULL;
1338 
1339  if (ata_msg_ctl(ap))
1340  ata_port_dbg(ap, "%s: EXIT\n", __func__);
1341 }
1342 
1343 static void ata_sff_pio_task(struct work_struct *work)
1344 {
1345  struct ata_port *ap =
1346  container_of(work, struct ata_port, sff_pio_task.work);
1347  struct ata_link *link = ap->sff_pio_task_link;
1348  struct ata_queued_cmd *qc;
1349  u8 status;
1350  int poll_next;
1351 
1352  BUG_ON(ap->sff_pio_task_link == NULL);
1353  /* qc can be NULL if timeout occurred */
1354  qc = ata_qc_from_tag(ap, link->active_tag);
1355  if (!qc) {
1356  ap->sff_pio_task_link = NULL;
1357  return;
1358  }
1359 
1360 fsm_start:
1362 
1363  /*
1364  * This is purely heuristic. This is a fast path.
1365  * Sometimes when we enter, BSY will be cleared in
1366  * a chk-status or two. If not, the drive is probably seeking
1367  * or something. Snooze for a couple msecs, then
1368  * chk-status again. If still busy, queue delayed work.
1369  */
1370  status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1371  if (status & ATA_BUSY) {
1372  ata_msleep(ap, 2);
1373  status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1374  if (status & ATA_BUSY) {
1376  return;
1377  }
1378  }
1379 
1380  /*
1381  * hsm_move() may trigger another command to be processed.
1382  * clean the link beforehand.
1383  */
1384  ap->sff_pio_task_link = NULL;
1385  /* move the HSM */
1386  poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1387 
1388  /* another command or interrupt handler
1389  * may be running at this point.
1390  */
1391  if (poll_next)
1392  goto fsm_start;
1393 }
1394 
1408 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 {
1410  struct ata_port *ap = qc->ap;
1411  struct ata_link *link = qc->dev->link;
1412 
1413  /* Use polling pio if the LLD doesn't handle
1414  * interrupt driven pio and atapi CDB interrupt.
1415  */
1416  if (ap->flags & ATA_FLAG_PIO_POLLING)
1417  qc->tf.flags |= ATA_TFLAG_POLLING;
1418 
1419  /* select the device */
1420  ata_dev_select(ap, qc->dev->devno, 1, 0);
1421 
1422  /* start the command */
1423  switch (qc->tf.protocol) {
1424  case ATA_PROT_NODATA:
1425  if (qc->tf.flags & ATA_TFLAG_POLLING)
1426  ata_qc_set_polling(qc);
1427 
1428  ata_tf_to_host(ap, &qc->tf);
1430 
1431  if (qc->tf.flags & ATA_TFLAG_POLLING)
1432  ata_sff_queue_pio_task(link, 0);
1433 
1434  break;
1435 
1436  case ATA_PROT_PIO:
1437  if (qc->tf.flags & ATA_TFLAG_POLLING)
1438  ata_qc_set_polling(qc);
1439 
1440  ata_tf_to_host(ap, &qc->tf);
1441 
1442  if (qc->tf.flags & ATA_TFLAG_WRITE) {
1443  /* PIO data out protocol */
1445  ata_sff_queue_pio_task(link, 0);
1446 
1447  /* always send first data block using the
1448  * ata_sff_pio_task() codepath.
1449  */
1450  } else {
1451  /* PIO data in protocol */
1452  ap->hsm_task_state = HSM_ST;
1453 
1454  if (qc->tf.flags & ATA_TFLAG_POLLING)
1455  ata_sff_queue_pio_task(link, 0);
1456 
1457  /* if polling, ata_sff_pio_task() handles the
1458  * rest. otherwise, interrupt handler takes
1459  * over from here.
1460  */
1461  }
1462 
1463  break;
1464 
1465  case ATAPI_PROT_PIO:
1466  case ATAPI_PROT_NODATA:
1467  if (qc->tf.flags & ATA_TFLAG_POLLING)
1468  ata_qc_set_polling(qc);
1469 
1470  ata_tf_to_host(ap, &qc->tf);
1471 
1473 
1474  /* send cdb by polling if no cdb interrupt */
1475  if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1476  (qc->tf.flags & ATA_TFLAG_POLLING))
1477  ata_sff_queue_pio_task(link, 0);
1478  break;
1479 
1480  default:
1481  WARN_ON_ONCE(1);
1482  return AC_ERR_SYSTEM;
1483  }
1484 
1485  return 0;
1486 }
1488 
1503 {
1504  qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1505  return true;
1506 }
1508 
1509 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1510 {
1511  ap->stats.idle_irq++;
1512 
1513 #ifdef ATA_IRQ_TRAP
1514  if ((ap->stats.idle_irq % 1000) == 0) {
1515  ap->ops->sff_check_status(ap);
1516  if (ap->ops->sff_irq_clear)
1517  ap->ops->sff_irq_clear(ap);
1518  ata_port_warn(ap, "irq trap\n");
1519  return 1;
1520  }
1521 #endif
1522  return 0; /* irq not handled */
1523 }
1524 
1525 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1526  struct ata_queued_cmd *qc,
1527  bool hsmv_on_idle)
1528 {
1529  u8 status;
1530 
1531  VPRINTK("ata%u: protocol %d task_state %d\n",
1532  ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1533 
1534  /* Check whether we are expecting interrupt in this state */
1535  switch (ap->hsm_task_state) {
1536  case HSM_ST_FIRST:
1537  /* Some pre-ATAPI-4 devices assert INTRQ
1538  * at this state when ready to receive CDB.
1539  */
1540 
1541  /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1542  * The flag was turned on only for atapi devices. No
1543  * need to check ata_is_atapi(qc->tf.protocol) again.
1544  */
1545  if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1546  return ata_sff_idle_irq(ap);
1547  break;
1548  case HSM_ST_IDLE:
1549  return ata_sff_idle_irq(ap);
1550  default:
1551  break;
1552  }
1553 
1554  /* check main status, clearing INTRQ if needed */
1555  status = ata_sff_irq_status(ap);
1556  if (status & ATA_BUSY) {
1557  if (hsmv_on_idle) {
1558  /* BMDMA engine is already stopped, we're screwed */
1559  qc->err_mask |= AC_ERR_HSM;
1560  ap->hsm_task_state = HSM_ST_ERR;
1561  } else
1562  return ata_sff_idle_irq(ap);
1563  }
1564 
1565  /* clear irq events */
1566  if (ap->ops->sff_irq_clear)
1567  ap->ops->sff_irq_clear(ap);
1568 
1569  ata_sff_hsm_move(ap, qc, status, 0);
1570 
1571  return 1; /* irq handled */
1572 }
1573 
1587 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1588 {
1589  return __ata_sff_port_intr(ap, qc, false);
1590 }
1592 
1593 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1594  unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1595 {
1596  struct ata_host *host = dev_instance;
1597  bool retried = false;
1598  unsigned int i;
1599  unsigned int handled, idle, polling;
1600  unsigned long flags;
1601 
1602  /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1603  spin_lock_irqsave(&host->lock, flags);
1604 
1605 retry:
1606  handled = idle = polling = 0;
1607  for (i = 0; i < host->n_ports; i++) {
1608  struct ata_port *ap = host->ports[i];
1609  struct ata_queued_cmd *qc;
1610 
1611  qc = ata_qc_from_tag(ap, ap->link.active_tag);
1612  if (qc) {
1613  if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1614  handled |= port_intr(ap, qc);
1615  else
1616  polling |= 1 << i;
1617  } else
1618  idle |= 1 << i;
1619  }
1620 
1621  /*
1622  * If no port was expecting IRQ but the controller is actually
1623  * asserting IRQ line, nobody cared will ensue. Check IRQ
1624  * pending status if available and clear spurious IRQ.
1625  */
1626  if (!handled && !retried) {
1627  bool retry = false;
1628 
1629  for (i = 0; i < host->n_ports; i++) {
1630  struct ata_port *ap = host->ports[i];
1631 
1632  if (polling & (1 << i))
1633  continue;
1634 
1635  if (!ap->ops->sff_irq_check ||
1636  !ap->ops->sff_irq_check(ap))
1637  continue;
1638 
1639  if (idle & (1 << i)) {
1640  ap->ops->sff_check_status(ap);
1641  if (ap->ops->sff_irq_clear)
1642  ap->ops->sff_irq_clear(ap);
1643  } else {
1644  /* clear INTRQ and check if BUSY cleared */
1645  if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1646  retry |= true;
1647  /*
1648  * With command in flight, we can't do
1649  * sff_irq_clear() w/o racing with completion.
1650  */
1651  }
1652  }
1653 
1654  if (retry) {
1655  retried = true;
1656  goto retry;
1657  }
1658  }
1659 
1660  spin_unlock_irqrestore(&host->lock, flags);
1661 
1662  return IRQ_RETVAL(handled);
1663 }
1664 
1679 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1680 {
1681  return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1682 }
1684 
1699 {
1700  u8 status;
1701  struct ata_queued_cmd *qc;
1702 
1703  /* Only one outstanding command per SFF channel */
1704  qc = ata_qc_from_tag(ap, ap->link.active_tag);
1705  /* We cannot lose an interrupt on a non-existent or polled command */
1706  if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1707  return;
1708  /* See if the controller thinks it is still busy - if so the command
1709  isn't a lost IRQ but is still in progress */
1710  status = ata_sff_altstatus(ap);
1711  if (status & ATA_BUSY)
1712  return;
1713 
1714  /* There was a command running, we are no longer busy and we have
1715  no interrupt. */
1716  ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
1717  status);
1718  /* Run the host interrupt logic as if the interrupt had not been
1719  lost */
1720  ata_sff_port_intr(ap, qc);
1721 }
1723 
1733 void ata_sff_freeze(struct ata_port *ap)
1734 {
1735  ap->ctl |= ATA_NIEN;
1736  ap->last_ctl = ap->ctl;
1737 
1738  if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1739  ata_sff_set_devctl(ap, ap->ctl);
1740 
1741  /* Under certain circumstances, some controllers raise IRQ on
1742  * ATA_NIEN manipulation. Also, many controllers fail to mask
1743  * previously pending IRQ on ATA_NIEN assertion. Clear it.
1744  */
1745  ap->ops->sff_check_status(ap);
1746 
1747  if (ap->ops->sff_irq_clear)
1748  ap->ops->sff_irq_clear(ap);
1749 }
1751 
1761 void ata_sff_thaw(struct ata_port *ap)
1762 {
1763  /* clear & re-enable interrupts */
1764  ap->ops->sff_check_status(ap);
1765  if (ap->ops->sff_irq_clear)
1766  ap->ops->sff_irq_clear(ap);
1767  ata_sff_irq_on(ap);
1768 }
1770 
1786 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1787 {
1788  struct ata_eh_context *ehc = &link->eh_context;
1789  int rc;
1790 
1791  rc = ata_std_prereset(link, deadline);
1792  if (rc)
1793  return rc;
1794 
1795  /* if we're about to do hardreset, nothing more to do */
1796  if (ehc->i.action & ATA_EH_HARDRESET)
1797  return 0;
1798 
1799  /* wait for !BSY if we don't know that no device is attached */
1800  if (!ata_link_offline(link)) {
1801  rc = ata_sff_wait_ready(link, deadline);
1802  if (rc && rc != -ENODEV) {
1803  ata_link_warn(link,
1804  "device not ready (errno=%d), forcing hardreset\n",
1805  rc);
1806  ehc->i.action |= ATA_EH_HARDRESET;
1807  }
1808  }
1809 
1810  return 0;
1811 }
1813 
1831 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1832 {
1833  struct ata_ioports *ioaddr = &ap->ioaddr;
1834  u8 nsect, lbal;
1835 
1836  ap->ops->sff_dev_select(ap, device);
1837 
1838  iowrite8(0x55, ioaddr->nsect_addr);
1839  iowrite8(0xaa, ioaddr->lbal_addr);
1840 
1841  iowrite8(0xaa, ioaddr->nsect_addr);
1842  iowrite8(0x55, ioaddr->lbal_addr);
1843 
1844  iowrite8(0x55, ioaddr->nsect_addr);
1845  iowrite8(0xaa, ioaddr->lbal_addr);
1846 
1847  nsect = ioread8(ioaddr->nsect_addr);
1848  lbal = ioread8(ioaddr->lbal_addr);
1849 
1850  if ((nsect == 0x55) && (lbal == 0xaa))
1851  return 1; /* we found a device */
1852 
1853  return 0; /* nothing found */
1854 }
1855 
1877 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1878  u8 *r_err)
1879 {
1880  struct ata_port *ap = dev->link->ap;
1881  struct ata_taskfile tf;
1882  unsigned int class;
1883  u8 err;
1884 
1885  ap->ops->sff_dev_select(ap, dev->devno);
1886 
1887  memset(&tf, 0, sizeof(tf));
1888 
1889  ap->ops->sff_tf_read(ap, &tf);
1890  err = tf.feature;
1891  if (r_err)
1892  *r_err = err;
1893 
1894  /* see if device passed diags: continue and warn later */
1895  if (err == 0)
1896  /* diagnostic fail : do nothing _YET_ */
1898  else if (err == 1)
1899  /* do nothing */ ;
1900  else if ((dev->devno == 0) && (err == 0x81))
1901  /* do nothing */ ;
1902  else
1903  return ATA_DEV_NONE;
1904 
1905  /* determine if device is ATA or ATAPI */
1906  class = ata_dev_classify(&tf);
1907 
1908  if (class == ATA_DEV_UNKNOWN) {
1909  /* If the device failed diagnostic, it's likely to
1910  * have reported incorrect device signature too.
1911  * Assume ATA device if the device seems present but
1912  * device signature is invalid with diagnostic
1913  * failure.
1914  */
1915  if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1916  class = ATA_DEV_ATA;
1917  else
1918  class = ATA_DEV_NONE;
1919  } else if ((class == ATA_DEV_ATA) &&
1920  (ap->ops->sff_check_status(ap) == 0))
1921  class = ATA_DEV_NONE;
1922 
1923  return class;
1924 }
1926 
1944 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1945  unsigned long deadline)
1946 {
1947  struct ata_port *ap = link->ap;
1948  struct ata_ioports *ioaddr = &ap->ioaddr;
1949  unsigned int dev0 = devmask & (1 << 0);
1950  unsigned int dev1 = devmask & (1 << 1);
1951  int rc, ret = 0;
1952 
1954 
1955  /* always check readiness of the master device */
1956  rc = ata_sff_wait_ready(link, deadline);
1957  /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1958  * and TF status is 0xff, bail out on it too.
1959  */
1960  if (rc)
1961  return rc;
1962 
1963  /* if device 1 was found in ata_devchk, wait for register
1964  * access briefly, then wait for BSY to clear.
1965  */
1966  if (dev1) {
1967  int i;
1968 
1969  ap->ops->sff_dev_select(ap, 1);
1970 
1971  /* Wait for register access. Some ATAPI devices fail
1972  * to set nsect/lbal after reset, so don't waste too
1973  * much time on it. We're gonna wait for !BSY anyway.
1974  */
1975  for (i = 0; i < 2; i++) {
1976  u8 nsect, lbal;
1977 
1978  nsect = ioread8(ioaddr->nsect_addr);
1979  lbal = ioread8(ioaddr->lbal_addr);
1980  if ((nsect == 1) && (lbal == 1))
1981  break;
1982  ata_msleep(ap, 50); /* give drive a breather */
1983  }
1984 
1985  rc = ata_sff_wait_ready(link, deadline);
1986  if (rc) {
1987  if (rc != -ENODEV)
1988  return rc;
1989  ret = rc;
1990  }
1991  }
1992 
1993  /* is all this really necessary? */
1994  ap->ops->sff_dev_select(ap, 0);
1995  if (dev1)
1996  ap->ops->sff_dev_select(ap, 1);
1997  if (dev0)
1998  ap->ops->sff_dev_select(ap, 0);
1999 
2000  return ret;
2001 }
2003 
2004 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2005  unsigned long deadline)
2006 {
2007  struct ata_ioports *ioaddr = &ap->ioaddr;
2008 
2009  DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2010 
2011  /* software reset. causes dev0 to be selected */
2012  iowrite8(ap->ctl, ioaddr->ctl_addr);
2013  udelay(20); /* FIXME: flush */
2014  iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2015  udelay(20); /* FIXME: flush */
2016  iowrite8(ap->ctl, ioaddr->ctl_addr);
2017  ap->last_ctl = ap->ctl;
2018 
2019  /* wait the port to become ready */
2020  return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2021 }
2022 
2037 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2038  unsigned long deadline)
2039 {
2040  struct ata_port *ap = link->ap;
2041  unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2042  unsigned int devmask = 0;
2043  int rc;
2044  u8 err;
2045 
2046  DPRINTK("ENTER\n");
2047 
2048  /* determine if device 0/1 are present */
2049  if (ata_devchk(ap, 0))
2050  devmask |= (1 << 0);
2051  if (slave_possible && ata_devchk(ap, 1))
2052  devmask |= (1 << 1);
2053 
2054  /* select device 0 again */
2055  ap->ops->sff_dev_select(ap, 0);
2056 
2057  /* issue bus reset */
2058  DPRINTK("about to softreset, devmask=%x\n", devmask);
2059  rc = ata_bus_softreset(ap, devmask, deadline);
2060  /* if link is occupied, -ENODEV too is an error */
2061  if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2062  ata_link_err(link, "SRST failed (errno=%d)\n", rc);
2063  return rc;
2064  }
2065 
2066  /* determine by signature whether we have ATA or ATAPI devices */
2067  classes[0] = ata_sff_dev_classify(&link->device[0],
2068  devmask & (1 << 0), &err);
2069  if (slave_possible && err != 0x81)
2070  classes[1] = ata_sff_dev_classify(&link->device[1],
2071  devmask & (1 << 1), &err);
2072 
2073  DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2074  return 0;
2075 }
2077 
2093 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2094  unsigned long deadline)
2095 {
2096  struct ata_eh_context *ehc = &link->eh_context;
2097  const unsigned long *timing = sata_ehc_deb_timing(ehc);
2098  bool online;
2099  int rc;
2100 
2101  rc = sata_link_hardreset(link, timing, deadline, &online,
2102  ata_sff_check_ready);
2103  if (online)
2104  *class = ata_sff_dev_classify(link->device, 1, NULL);
2105 
2106  DPRINTK("EXIT, class=%u\n", *class);
2107  return rc;
2108 }
2110 
2123 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2124 {
2125  struct ata_port *ap = link->ap;
2126 
2127  ata_std_postreset(link, classes);
2128 
2129  /* is double-select really necessary? */
2130  if (classes[0] != ATA_DEV_NONE)
2131  ap->ops->sff_dev_select(ap, 1);
2132  if (classes[1] != ATA_DEV_NONE)
2133  ap->ops->sff_dev_select(ap, 0);
2134 
2135  /* bail out if no device is present */
2136  if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2137  DPRINTK("EXIT, no device\n");
2138  return;
2139  }
2140 
2141  /* set up device control */
2142  if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2143  ata_sff_set_devctl(ap, ap->ctl);
2144  ap->last_ctl = ap->ctl;
2145  }
2146 }
2148 
2160 {
2161  int count;
2162  struct ata_port *ap;
2163 
2164  /* We only need to flush incoming data when a command was running */
2165  if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2166  return;
2167 
2168  ap = qc->ap;
2169  /* Drain up to 64K of data before we give up this recovery method */
2170  for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2171  && count < 65536; count += 2)
2172  ioread16(ap->ioaddr.data_addr);
2173 
2174  /* Can become DEBUG later */
2175  if (count)
2176  ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2177 
2178 }
2180 
2194 {
2195  ata_reset_fn_t softreset = ap->ops->softreset;
2196  ata_reset_fn_t hardreset = ap->ops->hardreset;
2197  struct ata_queued_cmd *qc;
2198  unsigned long flags;
2199 
2200  qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2201  if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2202  qc = NULL;
2203 
2204  spin_lock_irqsave(ap->lock, flags);
2205 
2206  /*
2207  * We *MUST* do FIFO draining before we issue a reset as
2208  * several devices helpfully clear their internal state and
2209  * will lock solid if we touch the data port post reset. Pass
2210  * qc in case anyone wants to do different PIO/DMA recovery or
2211  * has per command fixups
2212  */
2213  if (ap->ops->sff_drain_fifo)
2214  ap->ops->sff_drain_fifo(qc);
2215 
2216  spin_unlock_irqrestore(ap->lock, flags);
2217 
2218  /* ignore ata_sff_softreset if ctl isn't accessible */
2219  if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2220  softreset = NULL;
2221 
2222  /* ignore built-in hardresets if SCR access is not available */
2223  if ((hardreset == sata_std_hardreset ||
2224  hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2225  hardreset = NULL;
2226 
2227  ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2228  ap->ops->postreset);
2229 }
2231 
2243 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2244 {
2245  ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2246  ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2247  ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2248  ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2249  ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2250  ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2251  ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2252  ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2253  ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2254  ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2255 }
2257 
2258 #ifdef CONFIG_PCI
2259 
2260 static int ata_resources_present(struct pci_dev *pdev, int port)
2261 {
2262  int i;
2263 
2264  /* Check the PCI resources for this channel are enabled */
2265  port = port * 2;
2266  for (i = 0; i < 2; i++) {
2267  if (pci_resource_start(pdev, port + i) == 0 ||
2268  pci_resource_len(pdev, port + i) == 0)
2269  return 0;
2270  }
2271  return 1;
2272 }
2273 
2293 int ata_pci_sff_init_host(struct ata_host *host)
2294 {
2295  struct device *gdev = host->dev;
2296  struct pci_dev *pdev = to_pci_dev(gdev);
2297  unsigned int mask = 0;
2298  int i, rc;
2299 
2300  /* request, iomap BARs and init port addresses accordingly */
2301  for (i = 0; i < 2; i++) {
2302  struct ata_port *ap = host->ports[i];
2303  int base = i * 2;
2304  void __iomem * const *iomap;
2305 
2306  if (ata_port_is_dummy(ap))
2307  continue;
2308 
2309  /* Discard disabled ports. Some controllers show
2310  * their unused channels this way. Disabled ports are
2311  * made dummy.
2312  */
2313  if (!ata_resources_present(pdev, i)) {
2314  ap->ops = &ata_dummy_port_ops;
2315  continue;
2316  }
2317 
2318  rc = pcim_iomap_regions(pdev, 0x3 << base,
2319  dev_driver_string(gdev));
2320  if (rc) {
2321  dev_warn(gdev,
2322  "failed to request/iomap BARs for port %d (errno=%d)\n",
2323  i, rc);
2324  if (rc == -EBUSY)
2325  pcim_pin_device(pdev);
2326  ap->ops = &ata_dummy_port_ops;
2327  continue;
2328  }
2329  host->iomap = iomap = pcim_iomap_table(pdev);
2330 
2331  ap->ioaddr.cmd_addr = iomap[base];
2332  ap->ioaddr.altstatus_addr =
2333  ap->ioaddr.ctl_addr = (void __iomem *)
2334  ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2335  ata_sff_std_ports(&ap->ioaddr);
2336 
2337  ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2338  (unsigned long long)pci_resource_start(pdev, base),
2339  (unsigned long long)pci_resource_start(pdev, base + 1));
2340 
2341  mask |= 1 << i;
2342  }
2343 
2344  if (!mask) {
2345  dev_err(gdev, "no available native port\n");
2346  return -ENODEV;
2347  }
2348 
2349  return 0;
2350 }
2351 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2352 
2368 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2369  const struct ata_port_info * const *ppi,
2370  struct ata_host **r_host)
2371 {
2372  struct ata_host *host;
2373  int rc;
2374 
2375  if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2376  return -ENOMEM;
2377 
2378  host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2379  if (!host) {
2380  dev_err(&pdev->dev, "failed to allocate ATA host\n");
2381  rc = -ENOMEM;
2382  goto err_out;
2383  }
2384 
2385  rc = ata_pci_sff_init_host(host);
2386  if (rc)
2387  goto err_out;
2388 
2389  devres_remove_group(&pdev->dev, NULL);
2390  *r_host = host;
2391  return 0;
2392 
2393 err_out:
2394  devres_release_group(&pdev->dev, NULL);
2395  return rc;
2396 }
2397 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2398 
2415 int ata_pci_sff_activate_host(struct ata_host *host,
2417  struct scsi_host_template *sht)
2418 {
2419  struct device *dev = host->dev;
2420  struct pci_dev *pdev = to_pci_dev(dev);
2421  const char *drv_name = dev_driver_string(host->dev);
2422  int legacy_mode = 0, rc;
2423 
2424  rc = ata_host_start(host);
2425  if (rc)
2426  return rc;
2427 
2428  if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2429  u8 tmp8, mask;
2430 
2431  /* TODO: What if one channel is in native mode ... */
2432  pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2433  mask = (1 << 2) | (1 << 0);
2434  if ((tmp8 & mask) != mask)
2435  legacy_mode = 1;
2436 #if defined(CONFIG_NO_ATA_LEGACY)
2437  /* Some platforms with PCI limits cannot address compat
2438  port space. In that case we punt if their firmware has
2439  left a device in compatibility mode */
2440  if (legacy_mode) {
2441  printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2442  return -EOPNOTSUPP;
2443  }
2444 #endif
2445  }
2446 
2447  if (!devres_open_group(dev, NULL, GFP_KERNEL))
2448  return -ENOMEM;
2449 
2450  if (!legacy_mode && pdev->irq) {
2451  int i;
2452 
2453  rc = devm_request_irq(dev, pdev->irq, irq_handler,
2454  IRQF_SHARED, drv_name, host);
2455  if (rc)
2456  goto out;
2457 
2458  for (i = 0; i < 2; i++) {
2459  if (ata_port_is_dummy(host->ports[i]))
2460  continue;
2461  ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2462  }
2463  } else if (legacy_mode) {
2464  if (!ata_port_is_dummy(host->ports[0])) {
2465  rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2466  irq_handler, IRQF_SHARED,
2467  drv_name, host);
2468  if (rc)
2469  goto out;
2470 
2471  ata_port_desc(host->ports[0], "irq %d",
2472  ATA_PRIMARY_IRQ(pdev));
2473  }
2474 
2475  if (!ata_port_is_dummy(host->ports[1])) {
2476  rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2477  irq_handler, IRQF_SHARED,
2478  drv_name, host);
2479  if (rc)
2480  goto out;
2481 
2482  ata_port_desc(host->ports[1], "irq %d",
2483  ATA_SECONDARY_IRQ(pdev));
2484  }
2485  }
2486 
2487  rc = ata_host_register(host, sht);
2488 out:
2489  if (rc == 0)
2490  devres_remove_group(dev, NULL);
2491  else
2492  devres_release_group(dev, NULL);
2493 
2494  return rc;
2495 }
2496 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2497 
2498 static const struct ata_port_info *ata_sff_find_valid_pi(
2499  const struct ata_port_info * const *ppi)
2500 {
2501  int i;
2502 
2503  /* look up the first valid port_info */
2504  for (i = 0; i < 2 && ppi[i]; i++)
2505  if (ppi[i]->port_ops != &ata_dummy_port_ops)
2506  return ppi[i];
2507 
2508  return NULL;
2509 }
2510 
2511 static int ata_pci_init_one(struct pci_dev *pdev,
2512  const struct ata_port_info * const *ppi,
2513  struct scsi_host_template *sht, void *host_priv,
2514  int hflags, bool bmdma)
2515 {
2516  struct device *dev = &pdev->dev;
2517  const struct ata_port_info *pi;
2518  struct ata_host *host = NULL;
2519  int rc;
2520 
2521  DPRINTK("ENTER\n");
2522 
2523  pi = ata_sff_find_valid_pi(ppi);
2524  if (!pi) {
2525  dev_err(&pdev->dev, "no valid port_info specified\n");
2526  return -EINVAL;
2527  }
2528 
2529  if (!devres_open_group(dev, NULL, GFP_KERNEL))
2530  return -ENOMEM;
2531 
2532  rc = pcim_enable_device(pdev);
2533  if (rc)
2534  goto out;
2535 
2536 #ifdef CONFIG_ATA_BMDMA
2537  if (bmdma)
2538  /* prepare and activate BMDMA host */
2539  rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2540  else
2541 #endif
2542  /* prepare and activate SFF host */
2543  rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2544  if (rc)
2545  goto out;
2546  host->private_data = host_priv;
2547  host->flags |= hflags;
2548 
2549 #ifdef CONFIG_ATA_BMDMA
2550  if (bmdma) {
2551  pci_set_master(pdev);
2552  rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2553  } else
2554 #endif
2555  rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2556 out:
2557  if (rc == 0)
2558  devres_remove_group(&pdev->dev, NULL);
2559  else
2560  devres_release_group(&pdev->dev, NULL);
2561 
2562  return rc;
2563 }
2564 
2587 int ata_pci_sff_init_one(struct pci_dev *pdev,
2588  const struct ata_port_info * const *ppi,
2589  struct scsi_host_template *sht, void *host_priv, int hflag)
2590 {
2591  return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2592 }
2593 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2594 
2595 #endif /* CONFIG_PCI */
2596 
2597 /*
2598  * BMDMA support
2599  */
2600 
2601 #ifdef CONFIG_ATA_BMDMA
2602 
2603 const struct ata_port_operations ata_bmdma_port_ops = {
2605 
2606  .error_handler = ata_bmdma_error_handler,
2607  .post_internal_cmd = ata_bmdma_post_internal_cmd,
2608 
2609  .qc_prep = ata_bmdma_qc_prep,
2610  .qc_issue = ata_bmdma_qc_issue,
2611 
2612  .sff_irq_clear = ata_bmdma_irq_clear,
2613  .bmdma_setup = ata_bmdma_setup,
2614  .bmdma_start = ata_bmdma_start,
2615  .bmdma_stop = ata_bmdma_stop,
2616  .bmdma_status = ata_bmdma_status,
2617 
2618  .port_start = ata_bmdma_port_start,
2619 };
2620 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2621 
2622 const struct ata_port_operations ata_bmdma32_port_ops = {
2623  .inherits = &ata_bmdma_port_ops,
2624 
2625  .sff_data_xfer = ata_sff_data_xfer32,
2626  .port_start = ata_bmdma_port_start32,
2627 };
2628 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2629 
2641 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2642 {
2643  struct ata_port *ap = qc->ap;
2644  struct ata_bmdma_prd *prd = ap->bmdma_prd;
2645  struct scatterlist *sg;
2646  unsigned int si, pi;
2647 
2648  pi = 0;
2649  for_each_sg(qc->sg, sg, qc->n_elem, si) {
2650  u32 addr, offset;
2651  u32 sg_len, len;
2652 
2653  /* determine if physical DMA addr spans 64K boundary.
2654  * Note h/w doesn't support 64-bit, so we unconditionally
2655  * truncate dma_addr_t to u32.
2656  */
2657  addr = (u32) sg_dma_address(sg);
2658  sg_len = sg_dma_len(sg);
2659 
2660  while (sg_len) {
2661  offset = addr & 0xffff;
2662  len = sg_len;
2663  if ((offset + sg_len) > 0x10000)
2664  len = 0x10000 - offset;
2665 
2666  prd[pi].addr = cpu_to_le32(addr);
2667  prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2668  VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2669 
2670  pi++;
2671  sg_len -= len;
2672  addr += len;
2673  }
2674  }
2675 
2676  prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2677 }
2678 
2692 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2693 {
2694  struct ata_port *ap = qc->ap;
2695  struct ata_bmdma_prd *prd = ap->bmdma_prd;
2696  struct scatterlist *sg;
2697  unsigned int si, pi;
2698 
2699  pi = 0;
2700  for_each_sg(qc->sg, sg, qc->n_elem, si) {
2701  u32 addr, offset;
2702  u32 sg_len, len, blen;
2703 
2704  /* determine if physical DMA addr spans 64K boundary.
2705  * Note h/w doesn't support 64-bit, so we unconditionally
2706  * truncate dma_addr_t to u32.
2707  */
2708  addr = (u32) sg_dma_address(sg);
2709  sg_len = sg_dma_len(sg);
2710 
2711  while (sg_len) {
2712  offset = addr & 0xffff;
2713  len = sg_len;
2714  if ((offset + sg_len) > 0x10000)
2715  len = 0x10000 - offset;
2716 
2717  blen = len & 0xffff;
2718  prd[pi].addr = cpu_to_le32(addr);
2719  if (blen == 0) {
2720  /* Some PATA chipsets like the CS5530 can't
2721  cope with 0x0000 meaning 64K as the spec
2722  says */
2723  prd[pi].flags_len = cpu_to_le32(0x8000);
2724  blen = 0x8000;
2725  prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2726  }
2727  prd[pi].flags_len = cpu_to_le32(blen);
2728  VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2729 
2730  pi++;
2731  sg_len -= len;
2732  addr += len;
2733  }
2734  }
2735 
2736  prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2737 }
2738 
2748 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2749 {
2750  if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2751  return;
2752 
2753  ata_bmdma_fill_sg(qc);
2754 }
2755 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2756 
2766 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2767 {
2768  if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2769  return;
2770 
2771  ata_bmdma_fill_sg_dumb(qc);
2772 }
2773 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2774 
2789 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2790 {
2791  struct ata_port *ap = qc->ap;
2792  struct ata_link *link = qc->dev->link;
2793 
2794  /* defer PIO handling to sff_qc_issue */
2795  if (!ata_is_dma(qc->tf.protocol))
2796  return ata_sff_qc_issue(qc);
2797 
2798  /* select the device */
2799  ata_dev_select(ap, qc->dev->devno, 1, 0);
2800 
2801  /* start the command */
2802  switch (qc->tf.protocol) {
2803  case ATA_PROT_DMA:
2804  WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2805 
2806  ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2807  ap->ops->bmdma_setup(qc); /* set up bmdma */
2808  ap->ops->bmdma_start(qc); /* initiate bmdma */
2810  break;
2811 
2812  case ATAPI_PROT_DMA:
2813  WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2814 
2815  ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2816  ap->ops->bmdma_setup(qc); /* set up bmdma */
2818 
2819  /* send cdb by polling if no cdb interrupt */
2820  if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2821  ata_sff_queue_pio_task(link, 0);
2822  break;
2823 
2824  default:
2825  WARN_ON(1);
2826  return AC_ERR_SYSTEM;
2827  }
2828 
2829  return 0;
2830 }
2831 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2832 
2846 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2847 {
2848  struct ata_eh_info *ehi = &ap->link.eh_info;
2849  u8 host_stat = 0;
2850  bool bmdma_stopped = false;
2851  unsigned int handled;
2852 
2853  if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2854  /* check status of DMA engine */
2855  host_stat = ap->ops->bmdma_status(ap);
2856  VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2857 
2858  /* if it's not our irq... */
2859  if (!(host_stat & ATA_DMA_INTR))
2860  return ata_sff_idle_irq(ap);
2861 
2862  /* before we do anything else, clear DMA-Start bit */
2863  ap->ops->bmdma_stop(qc);
2864  bmdma_stopped = true;
2865 
2866  if (unlikely(host_stat & ATA_DMA_ERR)) {
2867  /* error when transferring data to/from memory */
2868  qc->err_mask |= AC_ERR_HOST_BUS;
2869  ap->hsm_task_state = HSM_ST_ERR;
2870  }
2871  }
2872 
2873  handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2874 
2875  if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2876  ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2877 
2878  return handled;
2879 }
2880 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2881 
2896 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2897 {
2898  return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2899 }
2900 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2901 
2914 void ata_bmdma_error_handler(struct ata_port *ap)
2915 {
2916  struct ata_queued_cmd *qc;
2917  unsigned long flags;
2918  bool thaw = false;
2919 
2920  qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2921  if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2922  qc = NULL;
2923 
2924  /* reset PIO HSM and stop DMA engine */
2925  spin_lock_irqsave(ap->lock, flags);
2926 
2927  if (qc && ata_is_dma(qc->tf.protocol)) {
2928  u8 host_stat;
2929 
2930  host_stat = ap->ops->bmdma_status(ap);
2931 
2932  /* BMDMA controllers indicate host bus error by
2933  * setting DMA_ERR bit and timing out. As it wasn't
2934  * really a timeout event, adjust error mask and
2935  * cancel frozen state.
2936  */
2937  if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2938  qc->err_mask = AC_ERR_HOST_BUS;
2939  thaw = true;
2940  }
2941 
2942  ap->ops->bmdma_stop(qc);
2943 
2944  /* if we're gonna thaw, make sure IRQ is clear */
2945  if (thaw) {
2946  ap->ops->sff_check_status(ap);
2947  if (ap->ops->sff_irq_clear)
2948  ap->ops->sff_irq_clear(ap);
2949  }
2950  }
2951 
2952  spin_unlock_irqrestore(ap->lock, flags);
2953 
2954  if (thaw)
2955  ata_eh_thaw_port(ap);
2956 
2958 }
2959 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2960 
2968 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2969 {
2970  struct ata_port *ap = qc->ap;
2971  unsigned long flags;
2972 
2973  if (ata_is_dma(qc->tf.protocol)) {
2974  spin_lock_irqsave(ap->lock, flags);
2975  ap->ops->bmdma_stop(qc);
2976  spin_unlock_irqrestore(ap->lock, flags);
2977  }
2978 }
2979 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2980 
2992 void ata_bmdma_irq_clear(struct ata_port *ap)
2993 {
2994  void __iomem *mmio = ap->ioaddr.bmdma_addr;
2995 
2996  if (!mmio)
2997  return;
2998 
2999  iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
3000 }
3001 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3002 
3010 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3011 {
3012  struct ata_port *ap = qc->ap;
3013  unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3014  u8 dmactl;
3015 
3016  /* load PRD table addr. */
3017  mb(); /* make sure PRD table writes are visible to controller */
3018  iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3019 
3020  /* specify data direction, triple-check start bit is clear */
3021  dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3022  dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3023  if (!rw)
3024  dmactl |= ATA_DMA_WR;
3025  iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3026 
3027  /* issue r/w command */
3028  ap->ops->sff_exec_command(ap, &qc->tf);
3029 }
3030 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3031 
3039 void ata_bmdma_start(struct ata_queued_cmd *qc)
3040 {
3041  struct ata_port *ap = qc->ap;
3042  u8 dmactl;
3043 
3044  /* start host DMA transaction */
3045  dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3046  iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3047 
3048  /* Strictly, one may wish to issue an ioread8() here, to
3049  * flush the mmio write. However, control also passes
3050  * to the hardware at this point, and it will interrupt
3051  * us when we are to resume control. So, in effect,
3052  * we don't care when the mmio write flushes.
3053  * Further, a read of the DMA status register _immediately_
3054  * following the write may not be what certain flaky hardware
3055  * is expected, so I think it is best to not add a readb()
3056  * without first all the MMIO ATA cards/mobos.
3057  * Or maybe I'm just being paranoid.
3058  *
3059  * FIXME: The posting of this write means I/O starts are
3060  * unnecessarily delayed for MMIO
3061  */
3062 }
3063 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3064 
3076 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3077 {
3078  struct ata_port *ap = qc->ap;
3079  void __iomem *mmio = ap->ioaddr.bmdma_addr;
3080 
3081  /* clear start/stop bit */
3083  mmio + ATA_DMA_CMD);
3084 
3085  /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3086  ata_sff_dma_pause(ap);
3087 }
3088 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3089 
3101 u8 ata_bmdma_status(struct ata_port *ap)
3102 {
3103  return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3104 }
3105 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3106 
3107 
3120 int ata_bmdma_port_start(struct ata_port *ap)
3121 {
3122  if (ap->mwdma_mask || ap->udma_mask) {
3123  ap->bmdma_prd =
3125  &ap->bmdma_prd_dma, GFP_KERNEL);
3126  if (!ap->bmdma_prd)
3127  return -ENOMEM;
3128  }
3129 
3130  return 0;
3131 }
3132 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3133 
3148 int ata_bmdma_port_start32(struct ata_port *ap)
3149 {
3151  return ata_bmdma_port_start(ap);
3152 }
3153 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3154 
3155 #ifdef CONFIG_PCI
3156 
3166 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3167 {
3168  unsigned long bmdma = pci_resource_start(pdev, 4);
3169  u8 simplex;
3170 
3171  if (bmdma == 0)
3172  return -ENOENT;
3173 
3174  simplex = inb(bmdma + 0x02);
3175  outb(simplex & 0x60, bmdma + 0x02);
3176  simplex = inb(bmdma + 0x02);
3177  if (simplex & 0x80)
3178  return -EOPNOTSUPP;
3179  return 0;
3180 }
3181 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3182 
3183 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3184 {
3185  int i;
3186 
3187  dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3188 
3189  for (i = 0; i < 2; i++) {
3190  host->ports[i]->mwdma_mask = 0;
3191  host->ports[i]->udma_mask = 0;
3192  }
3193 }
3194 
3204 void ata_pci_bmdma_init(struct ata_host *host)
3205 {
3206  struct device *gdev = host->dev;
3207  struct pci_dev *pdev = to_pci_dev(gdev);
3208  int i, rc;
3209 
3210  /* No BAR4 allocation: No DMA */
3211  if (pci_resource_start(pdev, 4) == 0) {
3212  ata_bmdma_nodma(host, "BAR4 is zero");
3213  return;
3214  }
3215 
3216  /*
3217  * Some controllers require BMDMA region to be initialized
3218  * even if DMA is not in use to clear IRQ status via
3219  * ->sff_irq_clear method. Try to initialize bmdma_addr
3220  * regardless of dma masks.
3221  */
3222  rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3223  if (rc)
3224  ata_bmdma_nodma(host, "failed to set dma mask");
3225  if (!rc) {
3226  rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3227  if (rc)
3228  ata_bmdma_nodma(host,
3229  "failed to set consistent dma mask");
3230  }
3231 
3232  /* request and iomap DMA region */
3233  rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3234  if (rc) {
3235  ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3236  return;
3237  }
3238  host->iomap = pcim_iomap_table(pdev);
3239 
3240  for (i = 0; i < 2; i++) {
3241  struct ata_port *ap = host->ports[i];
3242  void __iomem *bmdma = host->iomap[4] + 8 * i;
3243 
3244  if (ata_port_is_dummy(ap))
3245  continue;
3246 
3247  ap->ioaddr.bmdma_addr = bmdma;
3248  if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3249  (ioread8(bmdma + 2) & 0x80))
3250  host->flags |= ATA_HOST_SIMPLEX;
3251 
3252  ata_port_desc(ap, "bmdma 0x%llx",
3253  (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3254  }
3255 }
3256 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3257 
3273 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3274  const struct ata_port_info * const * ppi,
3275  struct ata_host **r_host)
3276 {
3277  int rc;
3278 
3279  rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3280  if (rc)
3281  return rc;
3282 
3283  ata_pci_bmdma_init(*r_host);
3284  return 0;
3285 }
3286 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3287 
3305 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3306  const struct ata_port_info * const * ppi,
3307  struct scsi_host_template *sht, void *host_priv,
3308  int hflags)
3309 {
3310  return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3311 }
3312 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3313 
3314 #endif /* CONFIG_PCI */
3315 #endif /* CONFIG_ATA_BMDMA */
3316 
3327 void ata_sff_port_init(struct ata_port *ap)
3328 {
3329  INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3330  ap->ctl = ATA_DEVCTL_OBS;
3331  ap->last_ctl = 0xFF;
3332 }
3333 
3335 {
3336  ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3337  if (!ata_sff_wq)
3338  return -ENOMEM;
3339 
3340  return 0;
3341 }
3342 
3343 void ata_sff_exit(void)
3344 {
3345  destroy_workqueue(ata_sff_wq);
3346 }