Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
libata-core.c
Go to the documentation of this file.
1 /*
2  * libata-core.c - helper library for ATA
3  *
4  * Maintained by: Jeff Garzik <[email protected]>
5  * Please ALWAYS copy [email protected]
6  * on emails.
7  *
8  * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9  * Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; see the file COPYING. If not, write to
24  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  * libata documentation is available via 'make {ps|pdf}docs',
28  * as Documentation/DocBook/libata.*
29  *
30  * Hardware documentation available from http://www.t13.org/ and
31  * http://www.sata-io.org/
32  *
33  * Standards documents from:
34  * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  * http://www.sata-io.org (SATA)
37  * http://www.compactflash.org (CF)
38  * http://www.qic.org (QIC157 - Tape and DSC)
39  * http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 #include <linux/pm_runtime.h>
70 
71 #include "libata.h"
72 #include "libata-transport.h"
73 
74 /* debounce timing parameters in msecs { interval, duration, timeout } */
75 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
76 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
77 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
78 
80  .prereset = ata_std_prereset,
81  .postreset = ata_std_postreset,
82  .error_handler = ata_std_error_handler,
83  .sched_eh = ata_std_sched_eh,
84  .end_eh = ata_std_end_eh,
85 };
86 
88  .inherits = &ata_base_port_ops,
89 
90  .qc_defer = ata_std_qc_defer,
91  .hardreset = sata_std_hardreset,
92 };
93 
94 static unsigned int ata_dev_init_params(struct ata_device *dev,
95  u16 heads, u16 sectors);
96 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
97 static void ata_dev_xfermask(struct ata_device *dev);
98 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
99 
101 
103  const char *name;
104  unsigned int cbl;
106  unsigned long xfer_mask;
107  unsigned int horkage_on;
108  unsigned int horkage_off;
109  unsigned int lflags;
110 };
111 
113  int port;
114  int device;
116 };
117 
118 static struct ata_force_ent *ata_force_tbl;
119 static int ata_force_tbl_size;
120 
121 static char ata_force_param_buf[PAGE_SIZE] __initdata;
122 /* param_buf is thrown away after initialization, disallow read */
123 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
124 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
125 
126 static int atapi_enabled = 1;
127 module_param(atapi_enabled, int, 0444);
128 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
129 
130 static int atapi_dmadir = 0;
131 module_param(atapi_dmadir, int, 0444);
132 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
133 
135 module_param(atapi_passthru16, int, 0444);
136 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
137 
138 int libata_fua = 0;
139 module_param_named(fua, libata_fua, int, 0444);
140 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
141 
142 static int ata_ignore_hpa;
143 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
144 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
145 
146 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
147 module_param_named(dma, libata_dma_mask, int, 0444);
148 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
149 
150 static int ata_probe_timeout;
151 module_param(ata_probe_timeout, int, 0444);
152 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
153 
155 module_param_named(noacpi, libata_noacpi, int, 0444);
156 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
157 
159 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
160 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
161 
162 static int atapi_an;
163 module_param(atapi_an, int, 0444);
164 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
165 
166 MODULE_AUTHOR("Jeff Garzik");
167 MODULE_DESCRIPTION("Library module for ATA devices");
168 MODULE_LICENSE("GPL");
170 
171 
172 static bool ata_sstatus_online(u32 sstatus)
173 {
174  return (sstatus & 0xf) == 0x3;
175 }
176 
189 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 {
192  BUG_ON(mode != ATA_LITER_EDGE &&
193  mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
194 
195  /* NULL link indicates start of iteration */
196  if (!link)
197  switch (mode) {
198  case ATA_LITER_EDGE:
199  case ATA_LITER_PMP_FIRST:
200  if (sata_pmp_attached(ap))
201  return ap->pmp_link;
202  /* fall through */
204  return &ap->link;
205  }
206 
207  /* we just iterated over the host link, what's next? */
208  if (link == &ap->link)
209  switch (mode) {
211  if (sata_pmp_attached(ap))
212  return ap->pmp_link;
213  /* fall through */
214  case ATA_LITER_PMP_FIRST:
215  if (unlikely(ap->slave_link))
216  return ap->slave_link;
217  /* fall through */
218  case ATA_LITER_EDGE:
219  return NULL;
220  }
221 
222  /* slave_link excludes PMP */
223  if (unlikely(link == ap->slave_link))
224  return NULL;
225 
226  /* we were over a PMP link */
227  if (++link < ap->pmp_link + ap->nr_pmp_links)
228  return link;
229 
230  if (mode == ATA_LITER_PMP_FIRST)
231  return &ap->link;
232 
233  return NULL;
234 }
235 
249  enum ata_dev_iter_mode mode)
250 {
252  mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
253 
254  /* NULL dev indicates start of iteration */
255  if (!dev)
256  switch (mode) {
257  case ATA_DITER_ENABLED:
258  case ATA_DITER_ALL:
259  dev = link->device;
260  goto check;
263  dev = link->device + ata_link_max_devices(link) - 1;
264  goto check;
265  }
266 
267  next:
268  /* move to the next one */
269  switch (mode) {
270  case ATA_DITER_ENABLED:
271  case ATA_DITER_ALL:
272  if (++dev < link->device + ata_link_max_devices(link))
273  goto check;
274  return NULL;
277  if (--dev >= link->device)
278  goto check;
279  return NULL;
280  }
281 
282  check:
283  if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
284  !ata_dev_enabled(dev))
285  goto next;
286  return dev;
287 }
288 
304 {
305  struct ata_port *ap = dev->link->ap;
306 
307  if (!ap->slave_link)
308  return dev->link;
309  if (!dev->devno)
310  return &ap->link;
311  return ap->slave_link;
312 }
313 
327 void ata_force_cbl(struct ata_port *ap)
328 {
329  int i;
330 
331  for (i = ata_force_tbl_size - 1; i >= 0; i--) {
332  const struct ata_force_ent *fe = &ata_force_tbl[i];
333 
334  if (fe->port != -1 && fe->port != ap->print_id)
335  continue;
336 
337  if (fe->param.cbl == ATA_CBL_NONE)
338  continue;
339 
340  ap->cbl = fe->param.cbl;
341  ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
342  return;
343  }
344 }
345 
362 static void ata_force_link_limits(struct ata_link *link)
363 {
364  bool did_spd = false;
365  int linkno = link->pmp;
366  int i;
367 
368  if (ata_is_host_link(link))
369  linkno += 15;
370 
371  for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372  const struct ata_force_ent *fe = &ata_force_tbl[i];
373 
374  if (fe->port != -1 && fe->port != link->ap->print_id)
375  continue;
376 
377  if (fe->device != -1 && fe->device != linkno)
378  continue;
379 
380  /* only honor the first spd limit */
381  if (!did_spd && fe->param.spd_limit) {
382  link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
383  ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
384  fe->param.name);
385  did_spd = true;
386  }
387 
388  /* let lflags stack */
389  if (fe->param.lflags) {
390  link->flags |= fe->param.lflags;
391  ata_link_notice(link,
392  "FORCE: link flag 0x%x forced -> 0x%x\n",
393  fe->param.lflags, link->flags);
394  }
395  }
396 }
397 
409 static void ata_force_xfermask(struct ata_device *dev)
410 {
411  int devno = dev->link->pmp + dev->devno;
412  int alt_devno = devno;
413  int i;
414 
415  /* allow n.15/16 for devices attached to host port */
416  if (ata_is_host_link(dev->link))
417  alt_devno += 15;
418 
419  for (i = ata_force_tbl_size - 1; i >= 0; i--) {
420  const struct ata_force_ent *fe = &ata_force_tbl[i];
421  unsigned long pio_mask, mwdma_mask, udma_mask;
422 
423  if (fe->port != -1 && fe->port != dev->link->ap->print_id)
424  continue;
425 
426  if (fe->device != -1 && fe->device != devno &&
427  fe->device != alt_devno)
428  continue;
429 
430  if (!fe->param.xfer_mask)
431  continue;
432 
433  ata_unpack_xfermask(fe->param.xfer_mask,
434  &pio_mask, &mwdma_mask, &udma_mask);
435  if (udma_mask)
436  dev->udma_mask = udma_mask;
437  else if (mwdma_mask) {
438  dev->udma_mask = 0;
439  dev->mwdma_mask = mwdma_mask;
440  } else {
441  dev->udma_mask = 0;
442  dev->mwdma_mask = 0;
443  dev->pio_mask = pio_mask;
444  }
445 
446  ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
447  fe->param.name);
448  return;
449  }
450 }
451 
463 static void ata_force_horkage(struct ata_device *dev)
464 {
465  int devno = dev->link->pmp + dev->devno;
466  int alt_devno = devno;
467  int i;
468 
469  /* allow n.15/16 for devices attached to host port */
470  if (ata_is_host_link(dev->link))
471  alt_devno += 15;
472 
473  for (i = 0; i < ata_force_tbl_size; i++) {
474  const struct ata_force_ent *fe = &ata_force_tbl[i];
475 
476  if (fe->port != -1 && fe->port != dev->link->ap->print_id)
477  continue;
478 
479  if (fe->device != -1 && fe->device != devno &&
480  fe->device != alt_devno)
481  continue;
482 
483  if (!(~dev->horkage & fe->param.horkage_on) &&
484  !(dev->horkage & fe->param.horkage_off))
485  continue;
486 
487  dev->horkage |= fe->param.horkage_on;
488  dev->horkage &= ~fe->param.horkage_off;
489 
490  ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
491  fe->param.name);
492  }
493 }
494 
508 {
509  switch (opcode) {
510  case GPCMD_READ_10:
511  case GPCMD_READ_12:
512  return ATAPI_READ;
513 
514  case GPCMD_WRITE_10:
515  case GPCMD_WRITE_12:
517  return ATAPI_WRITE;
518 
519  case GPCMD_READ_CD:
520  case GPCMD_READ_CD_MSF:
521  return ATAPI_READ_CD;
522 
523  case ATA_16:
524  case ATA_12:
525  if (atapi_passthru16)
526  return ATAPI_PASS_THRU;
527  /* fall thru */
528  default:
529  return ATAPI_MISC;
530  }
531 }
532 
546 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
547 {
548  fis[0] = 0x27; /* Register - Host to Device FIS */
549  fis[1] = pmp & 0xf; /* Port multiplier number*/
550  if (is_cmd)
551  fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
552 
553  fis[2] = tf->command;
554  fis[3] = tf->feature;
555 
556  fis[4] = tf->lbal;
557  fis[5] = tf->lbam;
558  fis[6] = tf->lbah;
559  fis[7] = tf->device;
560 
561  fis[8] = tf->hob_lbal;
562  fis[9] = tf->hob_lbam;
563  fis[10] = tf->hob_lbah;
564  fis[11] = tf->hob_feature;
565 
566  fis[12] = tf->nsect;
567  fis[13] = tf->hob_nsect;
568  fis[14] = 0;
569  fis[15] = tf->ctl;
570 
571  fis[16] = 0;
572  fis[17] = 0;
573  fis[18] = 0;
574  fis[19] = 0;
575 }
576 
588 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
589 {
590  tf->command = fis[2]; /* status */
591  tf->feature = fis[3]; /* error */
592 
593  tf->lbal = fis[4];
594  tf->lbam = fis[5];
595  tf->lbah = fis[6];
596  tf->device = fis[7];
597 
598  tf->hob_lbal = fis[8];
599  tf->hob_lbam = fis[9];
600  tf->hob_lbah = fis[10];
601 
602  tf->nsect = fis[12];
603  tf->hob_nsect = fis[13];
604 }
605 
606 static const u8 ata_rw_cmds[] = {
607  /* pio multi */
612  0,
613  0,
614  0,
616  /* pio */
621  0,
622  0,
623  0,
624  0,
625  /* dma */
626  ATA_CMD_READ,
630  0,
631  0,
632  0,
634 };
635 
647 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
648 {
649  u8 cmd;
650 
651  int index, fua, lba48, write;
652 
653  fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
654  lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
655  write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
656 
657  if (dev->flags & ATA_DFLAG_PIO) {
658  tf->protocol = ATA_PROT_PIO;
659  index = dev->multi_count ? 0 : 8;
660  } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
661  /* Unable to use DMA due to host limitation */
662  tf->protocol = ATA_PROT_PIO;
663  index = dev->multi_count ? 0 : 8;
664  } else {
665  tf->protocol = ATA_PROT_DMA;
666  index = 16;
667  }
668 
669  cmd = ata_rw_cmds[index + fua + lba48 + write];
670  if (cmd) {
671  tf->command = cmd;
672  return 0;
673  }
674  return -1;
675 }
676 
693 {
694  u64 block = 0;
695 
696  if (tf->flags & ATA_TFLAG_LBA) {
697  if (tf->flags & ATA_TFLAG_LBA48) {
698  block |= (u64)tf->hob_lbah << 40;
699  block |= (u64)tf->hob_lbam << 32;
700  block |= (u64)tf->hob_lbal << 24;
701  } else
702  block |= (tf->device & 0xf) << 24;
703 
704  block |= tf->lbah << 16;
705  block |= tf->lbam << 8;
706  block |= tf->lbal;
707  } else {
708  u32 cyl, head, sect;
709 
710  cyl = tf->lbam | (tf->lbah << 8);
711  head = tf->device & 0xf;
712  sect = tf->lbal;
713 
714  if (!sect) {
715  ata_dev_warn(dev,
716  "device reported invalid CHS sector 0\n");
717  sect = 1; /* oh well */
718  }
719 
720  block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
721  }
722 
723  return block;
724 }
725 
746 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
747  u64 block, u32 n_block, unsigned int tf_flags,
748  unsigned int tag)
749 {
751  tf->flags |= tf_flags;
752 
753  if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
754  /* yay, NCQ */
755  if (!lba_48_ok(block, n_block))
756  return -ERANGE;
757 
758  tf->protocol = ATA_PROT_NCQ;
760 
761  if (tf->flags & ATA_TFLAG_WRITE)
763  else
765 
766  tf->nsect = tag << 3;
767  tf->hob_feature = (n_block >> 8) & 0xff;
768  tf->feature = n_block & 0xff;
769 
770  tf->hob_lbah = (block >> 40) & 0xff;
771  tf->hob_lbam = (block >> 32) & 0xff;
772  tf->hob_lbal = (block >> 24) & 0xff;
773  tf->lbah = (block >> 16) & 0xff;
774  tf->lbam = (block >> 8) & 0xff;
775  tf->lbal = block & 0xff;
776 
777  tf->device = ATA_LBA;
778  if (tf->flags & ATA_TFLAG_FUA)
779  tf->device |= 1 << 7;
780  } else if (dev->flags & ATA_DFLAG_LBA) {
781  tf->flags |= ATA_TFLAG_LBA;
782 
783  if (lba_28_ok(block, n_block)) {
784  /* use LBA28 */
785  tf->device |= (block >> 24) & 0xf;
786  } else if (lba_48_ok(block, n_block)) {
787  if (!(dev->flags & ATA_DFLAG_LBA48))
788  return -ERANGE;
789 
790  /* use LBA48 */
791  tf->flags |= ATA_TFLAG_LBA48;
792 
793  tf->hob_nsect = (n_block >> 8) & 0xff;
794 
795  tf->hob_lbah = (block >> 40) & 0xff;
796  tf->hob_lbam = (block >> 32) & 0xff;
797  tf->hob_lbal = (block >> 24) & 0xff;
798  } else
799  /* request too large even for LBA48 */
800  return -ERANGE;
801 
802  if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
803  return -EINVAL;
804 
805  tf->nsect = n_block & 0xff;
806 
807  tf->lbah = (block >> 16) & 0xff;
808  tf->lbam = (block >> 8) & 0xff;
809  tf->lbal = block & 0xff;
810 
811  tf->device |= ATA_LBA;
812  } else {
813  /* CHS */
814  u32 sect, head, cyl, track;
815 
816  /* The request -may- be too large for CHS addressing. */
817  if (!lba_28_ok(block, n_block))
818  return -ERANGE;
819 
820  if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
821  return -EINVAL;
822 
823  /* Convert LBA to CHS */
824  track = (u32)block / dev->sectors;
825  cyl = track / dev->heads;
826  head = track % dev->heads;
827  sect = (u32)block % dev->sectors + 1;
828 
829  DPRINTK("block %u track %u cyl %u head %u sect %u\n",
830  (u32)block, track, cyl, head, sect);
831 
832  /* Check whether the converted CHS can fit.
833  Cylinder: 0-65535
834  Head: 0-15
835  Sector: 1-255*/
836  if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
837  return -ERANGE;
838 
839  tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
840  tf->lbal = sect;
841  tf->lbam = cyl;
842  tf->lbah = cyl >> 8;
843  tf->device |= head;
844  }
845 
846  return 0;
847 }
848 
864 unsigned long ata_pack_xfermask(unsigned long pio_mask,
865  unsigned long mwdma_mask,
866  unsigned long udma_mask)
867 {
868  return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
869  ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
870  ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
871 }
872 
883 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
884  unsigned long *mwdma_mask, unsigned long *udma_mask)
885 {
886  if (pio_mask)
887  *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
888  if (mwdma_mask)
889  *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
890  if (udma_mask)
891  *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
892 }
893 
894 static const struct ata_xfer_ent {
895  int shift, bits;
896  u8 base;
897 } ata_xfer_tbl[] = {
901  { -1, },
902 };
903 
917 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
918 {
919  int highbit = fls(xfer_mask) - 1;
920  const struct ata_xfer_ent *ent;
921 
922  for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
923  if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
924  return ent->base + highbit - ent->shift;
925  return 0xff;
926 }
927 
941 {
942  const struct ata_xfer_ent *ent;
943 
944  for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
945  if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
946  return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
947  & ~((1 << ent->shift) - 1);
948  return 0;
949 }
950 
963 int ata_xfer_mode2shift(unsigned long xfer_mode)
964 {
965  const struct ata_xfer_ent *ent;
966 
967  for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
968  if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
969  return ent->shift;
970  return -1;
971 }
972 
987 const char *ata_mode_string(unsigned long xfer_mask)
988 {
989  static const char * const xfer_mode_str[] = {
990  "PIO0",
991  "PIO1",
992  "PIO2",
993  "PIO3",
994  "PIO4",
995  "PIO5",
996  "PIO6",
997  "MWDMA0",
998  "MWDMA1",
999  "MWDMA2",
1000  "MWDMA3",
1001  "MWDMA4",
1002  "UDMA/16",
1003  "UDMA/25",
1004  "UDMA/33",
1005  "UDMA/44",
1006  "UDMA/66",
1007  "UDMA/100",
1008  "UDMA/133",
1009  "UDMA7",
1010  };
1011  int highbit;
1012 
1013  highbit = fls(xfer_mask) - 1;
1014  if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1015  return xfer_mode_str[highbit];
1016  return "<n/a>";
1017 }
1018 
1019 const char *sata_spd_string(unsigned int spd)
1020 {
1021  static const char * const spd_str[] = {
1022  "1.5 Gbps",
1023  "3.0 Gbps",
1024  "6.0 Gbps",
1025  };
1026 
1027  if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1028  return "<unknown>";
1029  return spd_str[spd - 1];
1030 }
1031 
1047 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1048 {
1049  /* Apple's open source Darwin code hints that some devices only
1050  * put a proper signature into the LBA mid/high registers,
1051  * So, we only check those. It's sufficient for uniqueness.
1052  *
1053  * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1054  * signatures for ATA and ATAPI devices attached on SerialATA,
1055  * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1056  * spec has never mentioned about using different signatures
1057  * for ATA/ATAPI devices. Then, Serial ATA II: Port
1058  * Multiplier specification began to use 0x69/0x96 to identify
1059  * port multpliers and 0x3c/0xc3 to identify SEMB device.
1060  * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1061  * 0x69/0x96 shortly and described them as reserved for
1062  * SerialATA.
1063  *
1064  * We follow the current spec and consider that 0x69/0x96
1065  * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1066  * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1067  * SEMB signature. This is worked around in
1068  * ata_dev_read_id().
1069  */
1070  if ((tf->lbam == 0) && (tf->lbah == 0)) {
1071  DPRINTK("found ATA device by sig\n");
1072  return ATA_DEV_ATA;
1073  }
1074 
1075  if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1076  DPRINTK("found ATAPI device by sig\n");
1077  return ATA_DEV_ATAPI;
1078  }
1079 
1080  if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1081  DPRINTK("found PMP device by sig\n");
1082  return ATA_DEV_PMP;
1083  }
1084 
1085  if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1086  DPRINTK("found SEMB device by sig (could be ATA device)\n");
1087  return ATA_DEV_SEMB;
1088  }
1089 
1090  DPRINTK("unknown device\n");
1091  return ATA_DEV_UNKNOWN;
1092 }
1093 
1109 void ata_id_string(const u16 *id, unsigned char *s,
1110  unsigned int ofs, unsigned int len)
1111 {
1112  unsigned int c;
1113 
1114  BUG_ON(len & 1);
1115 
1116  while (len > 0) {
1117  c = id[ofs] >> 8;
1118  *s = c;
1119  s++;
1120 
1121  c = id[ofs] & 0xff;
1122  *s = c;
1123  s++;
1124 
1125  ofs++;
1126  len -= 2;
1127  }
1128 }
1129 
1144 void ata_id_c_string(const u16 *id, unsigned char *s,
1145  unsigned int ofs, unsigned int len)
1146 {
1147  unsigned char *p;
1148 
1149  ata_id_string(id, s, ofs, len - 1);
1150 
1151  p = s + strnlen(s, len - 1);
1152  while (p > s && p[-1] == ' ')
1153  p--;
1154  *p = '\0';
1155 }
1156 
1157 static u64 ata_id_n_sectors(const u16 *id)
1158 {
1159  if (ata_id_has_lba(id)) {
1160  if (ata_id_has_lba48(id))
1161  return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1162  else
1163  return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1164  } else {
1165  if (ata_id_current_chs_valid(id))
1166  return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1167  id[ATA_ID_CUR_SECTORS];
1168  else
1169  return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1170  id[ATA_ID_SECTORS];
1171  }
1172 }
1173 
1175 {
1176  u64 sectors = 0;
1177 
1178  sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1179  sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1180  sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1181  sectors |= (tf->lbah & 0xff) << 16;
1182  sectors |= (tf->lbam & 0xff) << 8;
1183  sectors |= (tf->lbal & 0xff);
1184 
1185  return sectors;
1186 }
1187 
1188 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1189 {
1190  u64 sectors = 0;
1191 
1192  sectors |= (tf->device & 0x0f) << 24;
1193  sectors |= (tf->lbah & 0xff) << 16;
1194  sectors |= (tf->lbam & 0xff) << 8;
1195  sectors |= (tf->lbal & 0xff);
1196 
1197  return sectors;
1198 }
1199 
1212 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1213 {
1214  unsigned int err_mask;
1215  struct ata_taskfile tf;
1216  int lba48 = ata_id_has_lba48(dev->id);
1217 
1218  ata_tf_init(dev, &tf);
1219 
1220  /* always clear all address registers */
1222 
1223  if (lba48) {
1225  tf.flags |= ATA_TFLAG_LBA48;
1226  } else
1228 
1229  tf.protocol |= ATA_PROT_NODATA;
1230  tf.device |= ATA_LBA;
1231 
1232  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1233  if (err_mask) {
1234  ata_dev_warn(dev,
1235  "failed to read native max address (err_mask=0x%x)\n",
1236  err_mask);
1237  if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1238  return -EACCES;
1239  return -EIO;
1240  }
1241 
1242  if (lba48)
1243  *max_sectors = ata_tf_to_lba48(&tf) + 1;
1244  else
1245  *max_sectors = ata_tf_to_lba(&tf) + 1;
1246  if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1247  (*max_sectors)--;
1248  return 0;
1249 }
1250 
1263 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1264 {
1265  unsigned int err_mask;
1266  struct ata_taskfile tf;
1267  int lba48 = ata_id_has_lba48(dev->id);
1268 
1269  new_sectors--;
1270 
1271  ata_tf_init(dev, &tf);
1272 
1274 
1275  if (lba48) {
1277  tf.flags |= ATA_TFLAG_LBA48;
1278 
1279  tf.hob_lbal = (new_sectors >> 24) & 0xff;
1280  tf.hob_lbam = (new_sectors >> 32) & 0xff;
1281  tf.hob_lbah = (new_sectors >> 40) & 0xff;
1282  } else {
1283  tf.command = ATA_CMD_SET_MAX;
1284 
1285  tf.device |= (new_sectors >> 24) & 0xf;
1286  }
1287 
1288  tf.protocol |= ATA_PROT_NODATA;
1289  tf.device |= ATA_LBA;
1290 
1291  tf.lbal = (new_sectors >> 0) & 0xff;
1292  tf.lbam = (new_sectors >> 8) & 0xff;
1293  tf.lbah = (new_sectors >> 16) & 0xff;
1294 
1295  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1296  if (err_mask) {
1297  ata_dev_warn(dev,
1298  "failed to set max address (err_mask=0x%x)\n",
1299  err_mask);
1300  if (err_mask == AC_ERR_DEV &&
1301  (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1302  return -EACCES;
1303  return -EIO;
1304  }
1305 
1306  return 0;
1307 }
1308 
1320 static int ata_hpa_resize(struct ata_device *dev)
1321 {
1322  struct ata_eh_context *ehc = &dev->link->eh_context;
1323  int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1324  bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1325  u64 sectors = ata_id_n_sectors(dev->id);
1326  u64 native_sectors;
1327  int rc;
1328 
1329  /* do we need to do it? */
1330  if (dev->class != ATA_DEV_ATA ||
1331  !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1333  return 0;
1334 
1335  /* read native max address */
1336  rc = ata_read_native_max_address(dev, &native_sectors);
1337  if (rc) {
1338  /* If device aborted the command or HPA isn't going to
1339  * be unlocked, skip HPA resizing.
1340  */
1341  if (rc == -EACCES || !unlock_hpa) {
1342  ata_dev_warn(dev,
1343  "HPA support seems broken, skipping HPA handling\n");
1345 
1346  /* we can continue if device aborted the command */
1347  if (rc == -EACCES)
1348  rc = 0;
1349  }
1350 
1351  return rc;
1352  }
1353  dev->n_native_sectors = native_sectors;
1354 
1355  /* nothing to do? */
1356  if (native_sectors <= sectors || !unlock_hpa) {
1357  if (!print_info || native_sectors == sectors)
1358  return 0;
1359 
1360  if (native_sectors > sectors)
1361  ata_dev_info(dev,
1362  "HPA detected: current %llu, native %llu\n",
1363  (unsigned long long)sectors,
1364  (unsigned long long)native_sectors);
1365  else if (native_sectors < sectors)
1366  ata_dev_warn(dev,
1367  "native sectors (%llu) is smaller than sectors (%llu)\n",
1368  (unsigned long long)native_sectors,
1369  (unsigned long long)sectors);
1370  return 0;
1371  }
1372 
1373  /* let's unlock HPA */
1374  rc = ata_set_max_sectors(dev, native_sectors);
1375  if (rc == -EACCES) {
1376  /* if device aborted the command, skip HPA resizing */
1377  ata_dev_warn(dev,
1378  "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1379  (unsigned long long)sectors,
1380  (unsigned long long)native_sectors);
1382  return 0;
1383  } else if (rc)
1384  return rc;
1385 
1386  /* re-read IDENTIFY data */
1387  rc = ata_dev_reread_id(dev, 0);
1388  if (rc) {
1389  ata_dev_err(dev,
1390  "failed to re-read IDENTIFY data after HPA resizing\n");
1391  return rc;
1392  }
1393 
1394  if (print_info) {
1395  u64 new_sectors = ata_id_n_sectors(dev->id);
1396  ata_dev_info(dev,
1397  "HPA unlocked: %llu -> %llu, native %llu\n",
1398  (unsigned long long)sectors,
1399  (unsigned long long)new_sectors,
1400  (unsigned long long)native_sectors);
1401  }
1402 
1403  return 0;
1404 }
1405 
1417 static inline void ata_dump_id(const u16 *id)
1418 {
1419  DPRINTK("49==0x%04x "
1420  "53==0x%04x "
1421  "63==0x%04x "
1422  "64==0x%04x "
1423  "75==0x%04x \n",
1424  id[49],
1425  id[53],
1426  id[63],
1427  id[64],
1428  id[75]);
1429  DPRINTK("80==0x%04x "
1430  "81==0x%04x "
1431  "82==0x%04x "
1432  "83==0x%04x "
1433  "84==0x%04x \n",
1434  id[80],
1435  id[81],
1436  id[82],
1437  id[83],
1438  id[84]);
1439  DPRINTK("88==0x%04x "
1440  "93==0x%04x\n",
1441  id[88],
1442  id[93]);
1443 }
1444 
1460 unsigned long ata_id_xfermask(const u16 *id)
1461 {
1462  unsigned long pio_mask, mwdma_mask, udma_mask;
1463 
1464  /* Usual case. Word 53 indicates word 64 is valid */
1465  if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1466  pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1467  pio_mask <<= 3;
1468  pio_mask |= 0x7;
1469  } else {
1470  /* If word 64 isn't valid then Word 51 high byte holds
1471  * the PIO timing number for the maximum. Turn it into
1472  * a mask.
1473  */
1474  u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1475  if (mode < 5) /* Valid PIO range */
1476  pio_mask = (2 << mode) - 1;
1477  else
1478  pio_mask = 1;
1479 
1480  /* But wait.. there's more. Design your standards by
1481  * committee and you too can get a free iordy field to
1482  * process. However its the speeds not the modes that
1483  * are supported... Note drivers using the timing API
1484  * will get this right anyway
1485  */
1486  }
1487 
1488  mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1489 
1490  if (ata_id_is_cfa(id)) {
1491  /*
1492  * Process compact flash extended modes
1493  */
1494  int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1495  int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1496 
1497  if (pio)
1498  pio_mask |= (1 << 5);
1499  if (pio > 1)
1500  pio_mask |= (1 << 6);
1501  if (dma)
1502  mwdma_mask |= (1 << 3);
1503  if (dma > 1)
1504  mwdma_mask |= (1 << 4);
1505  }
1506 
1507  udma_mask = 0;
1508  if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1509  udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1510 
1511  return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1512 }
1513 
1514 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1515 {
1516  struct completion *waiting = qc->private_data;
1517 
1518  complete(waiting);
1519 }
1520 
1543 unsigned ata_exec_internal_sg(struct ata_device *dev,
1544  struct ata_taskfile *tf, const u8 *cdb,
1545  int dma_dir, struct scatterlist *sgl,
1546  unsigned int n_elem, unsigned long timeout)
1547 {
1548  struct ata_link *link = dev->link;
1549  struct ata_port *ap = link->ap;
1550  u8 command = tf->command;
1551  int auto_timeout = 0;
1552  struct ata_queued_cmd *qc;
1553  unsigned int tag, preempted_tag;
1554  u32 preempted_sactive, preempted_qc_active;
1555  int preempted_nr_active_links;
1557  unsigned long flags;
1558  unsigned int err_mask;
1559  int rc;
1560 
1561  spin_lock_irqsave(ap->lock, flags);
1562 
1563  /* no internal command while frozen */
1564  if (ap->pflags & ATA_PFLAG_FROZEN) {
1565  spin_unlock_irqrestore(ap->lock, flags);
1566  return AC_ERR_SYSTEM;
1567  }
1568 
1569  /* initialize internal qc */
1570 
1571  /* XXX: Tag 0 is used for drivers with legacy EH as some
1572  * drivers choke if any other tag is given. This breaks
1573  * ata_tag_internal() test for those drivers. Don't use new
1574  * EH stuff without converting to it.
1575  */
1576  if (ap->ops->error_handler)
1577  tag = ATA_TAG_INTERNAL;
1578  else
1579  tag = 0;
1580 
1581  if (test_and_set_bit(tag, &ap->qc_allocated))
1582  BUG();
1583  qc = __ata_qc_from_tag(ap, tag);
1584 
1585  qc->tag = tag;
1586  qc->scsicmd = NULL;
1587  qc->ap = ap;
1588  qc->dev = dev;
1589  ata_qc_reinit(qc);
1590 
1591  preempted_tag = link->active_tag;
1592  preempted_sactive = link->sactive;
1593  preempted_qc_active = ap->qc_active;
1594  preempted_nr_active_links = ap->nr_active_links;
1595  link->active_tag = ATA_TAG_POISON;
1596  link->sactive = 0;
1597  ap->qc_active = 0;
1598  ap->nr_active_links = 0;
1599 
1600  /* prepare & issue qc */
1601  qc->tf = *tf;
1602  if (cdb)
1603  memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1604  qc->flags |= ATA_QCFLAG_RESULT_TF;
1605  qc->dma_dir = dma_dir;
1606  if (dma_dir != DMA_NONE) {
1607  unsigned int i, buflen = 0;
1608  struct scatterlist *sg;
1609 
1610  for_each_sg(sgl, sg, n_elem, i)
1611  buflen += sg->length;
1612 
1613  ata_sg_init(qc, sgl, n_elem);
1614  qc->nbytes = buflen;
1615  }
1616 
1617  qc->private_data = &wait;
1618  qc->complete_fn = ata_qc_complete_internal;
1619 
1620  ata_qc_issue(qc);
1621 
1622  spin_unlock_irqrestore(ap->lock, flags);
1623 
1624  if (!timeout) {
1625  if (ata_probe_timeout)
1626  timeout = ata_probe_timeout * 1000;
1627  else {
1628  timeout = ata_internal_cmd_timeout(dev, command);
1629  auto_timeout = 1;
1630  }
1631  }
1632 
1633  if (ap->ops->error_handler)
1634  ata_eh_release(ap);
1635 
1637 
1638  if (ap->ops->error_handler)
1639  ata_eh_acquire(ap);
1640 
1642 
1643  if (!rc) {
1644  spin_lock_irqsave(ap->lock, flags);
1645 
1646  /* We're racing with irq here. If we lose, the
1647  * following test prevents us from completing the qc
1648  * twice. If we win, the port is frozen and will be
1649  * cleaned up by ->post_internal_cmd().
1650  */
1651  if (qc->flags & ATA_QCFLAG_ACTIVE) {
1652  qc->err_mask |= AC_ERR_TIMEOUT;
1653 
1654  if (ap->ops->error_handler)
1655  ata_port_freeze(ap);
1656  else
1657  ata_qc_complete(qc);
1658 
1659  if (ata_msg_warn(ap))
1660  ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1661  command);
1662  }
1663 
1664  spin_unlock_irqrestore(ap->lock, flags);
1665  }
1666 
1667  /* do post_internal_cmd */
1668  if (ap->ops->post_internal_cmd)
1669  ap->ops->post_internal_cmd(qc);
1670 
1671  /* perform minimal error analysis */
1672  if (qc->flags & ATA_QCFLAG_FAILED) {
1673  if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1674  qc->err_mask |= AC_ERR_DEV;
1675 
1676  if (!qc->err_mask)
1677  qc->err_mask |= AC_ERR_OTHER;
1678 
1679  if (qc->err_mask & ~AC_ERR_OTHER)
1680  qc->err_mask &= ~AC_ERR_OTHER;
1681  }
1682 
1683  /* finish up */
1684  spin_lock_irqsave(ap->lock, flags);
1685 
1686  *tf = qc->result_tf;
1687  err_mask = qc->err_mask;
1688 
1689  ata_qc_free(qc);
1690  link->active_tag = preempted_tag;
1691  link->sactive = preempted_sactive;
1692  ap->qc_active = preempted_qc_active;
1693  ap->nr_active_links = preempted_nr_active_links;
1694 
1695  spin_unlock_irqrestore(ap->lock, flags);
1696 
1697  if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1698  ata_internal_cmd_timed_out(dev, command);
1699 
1700  return err_mask;
1701 }
1702 
1722 unsigned ata_exec_internal(struct ata_device *dev,
1723  struct ata_taskfile *tf, const u8 *cdb,
1724  int dma_dir, void *buf, unsigned int buflen,
1725  unsigned long timeout)
1726 {
1727  struct scatterlist *psg = NULL, sg;
1728  unsigned int n_elem = 0;
1729 
1730  if (dma_dir != DMA_NONE) {
1731  WARN_ON(!buf);
1732  sg_init_one(&sg, buf, buflen);
1733  psg = &sg;
1734  n_elem++;
1735  }
1736 
1737  return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1738  timeout);
1739 }
1740 
1755 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1756 {
1757  struct ata_taskfile tf;
1758 
1759  ata_tf_init(dev, &tf);
1760 
1761  tf.command = cmd;
1762  tf.flags |= ATA_TFLAG_DEVICE;
1764 
1765  return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1766 }
1767 
1775 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1776 {
1777  /* Don't set IORDY if we're preparing for reset. IORDY may
1778  * lead to controller lock up on certain controllers if the
1779  * port is not occupied. See bko#11703 for details.
1780  */
1781  if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1782  return 0;
1783  /* Controller doesn't support IORDY. Probably a pointless
1784  * check as the caller should know this.
1785  */
1786  if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1787  return 0;
1788  /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1789  if (ata_id_is_cfa(adev->id)
1790  && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1791  return 0;
1792  /* PIO3 and higher it is mandatory */
1793  if (adev->pio_mode > XFER_PIO_2)
1794  return 1;
1795  /* We turn it on when possible */
1796  if (ata_id_has_iordy(adev->id))
1797  return 1;
1798  return 0;
1799 }
1800 
1808 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1809 {
1810  /* If we have no drive specific rule, then PIO 2 is non IORDY */
1811  if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1812  u16 pio = adev->id[ATA_ID_EIDE_PIO];
1813  /* Is the speed faster than the drive allows non IORDY ? */
1814  if (pio) {
1815  /* This is cycle times not frequency - watch the logic! */
1816  if (pio > 240) /* PIO2 is 240nS per cycle */
1817  return 3 << ATA_SHIFT_PIO;
1818  return 7 << ATA_SHIFT_PIO;
1819  }
1820  }
1821  return 3 << ATA_SHIFT_PIO;
1822 }
1823 
1834 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1835  struct ata_taskfile *tf, u16 *id)
1836 {
1837  return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1838  id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1839 }
1840 
1862 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1863  unsigned int flags, u16 *id)
1864 {
1865  struct ata_port *ap = dev->link->ap;
1866  unsigned int class = *p_class;
1867  struct ata_taskfile tf;
1868  unsigned int err_mask = 0;
1869  const char *reason;
1870  bool is_semb = class == ATA_DEV_SEMB;
1871  int may_fallback = 1, tried_spinup = 0;
1872  int rc;
1873 
1874  if (ata_msg_ctl(ap))
1875  ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1876 
1877 retry:
1878  ata_tf_init(dev, &tf);
1879 
1880  switch (class) {
1881  case ATA_DEV_SEMB:
1882  class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1883  case ATA_DEV_ATA:
1884  tf.command = ATA_CMD_ID_ATA;
1885  break;
1886  case ATA_DEV_ATAPI:
1888  break;
1889  default:
1890  rc = -ENODEV;
1891  reason = "unsupported class";
1892  goto err_out;
1893  }
1894 
1895  tf.protocol = ATA_PROT_PIO;
1896 
1897  /* Some devices choke if TF registers contain garbage. Make
1898  * sure those are properly initialized.
1899  */
1901 
1902  /* Device presence detection is unreliable on some
1903  * controllers. Always poll IDENTIFY if available.
1904  */
1905  tf.flags |= ATA_TFLAG_POLLING;
1906 
1907  if (ap->ops->read_id)
1908  err_mask = ap->ops->read_id(dev, &tf, id);
1909  else
1910  err_mask = ata_do_dev_read_id(dev, &tf, id);
1911 
1912  if (err_mask) {
1913  if (err_mask & AC_ERR_NODEV_HINT) {
1914  ata_dev_dbg(dev, "NODEV after polling detection\n");
1915  return -ENOENT;
1916  }
1917 
1918  if (is_semb) {
1919  ata_dev_info(dev,
1920  "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1921  /* SEMB is not supported yet */
1922  *p_class = ATA_DEV_SEMB_UNSUP;
1923  return 0;
1924  }
1925 
1926  if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1927  /* Device or controller might have reported
1928  * the wrong device class. Give a shot at the
1929  * other IDENTIFY if the current one is
1930  * aborted by the device.
1931  */
1932  if (may_fallback) {
1933  may_fallback = 0;
1934 
1935  if (class == ATA_DEV_ATA)
1936  class = ATA_DEV_ATAPI;
1937  else
1938  class = ATA_DEV_ATA;
1939  goto retry;
1940  }
1941 
1942  /* Control reaches here iff the device aborted
1943  * both flavors of IDENTIFYs which happens
1944  * sometimes with phantom devices.
1945  */
1946  ata_dev_dbg(dev,
1947  "both IDENTIFYs aborted, assuming NODEV\n");
1948  return -ENOENT;
1949  }
1950 
1951  rc = -EIO;
1952  reason = "I/O error";
1953  goto err_out;
1954  }
1955 
1956  if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1957  ata_dev_dbg(dev, "dumping IDENTIFY data, "
1958  "class=%d may_fallback=%d tried_spinup=%d\n",
1959  class, may_fallback, tried_spinup);
1960  print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1961  16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1962  }
1963 
1964  /* Falling back doesn't make sense if ID data was read
1965  * successfully at least once.
1966  */
1967  may_fallback = 0;
1968 
1970 
1971  /* sanity check */
1972  rc = -EINVAL;
1973  reason = "device reports invalid type";
1974 
1975  if (class == ATA_DEV_ATA) {
1976  if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1977  goto err_out;
1978  if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1979  ata_id_is_ata(id)) {
1980  ata_dev_dbg(dev,
1981  "host indicates ignore ATA devices, ignored\n");
1982  return -ENOENT;
1983  }
1984  } else {
1985  if (ata_id_is_ata(id))
1986  goto err_out;
1987  }
1988 
1989  if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1990  tried_spinup = 1;
1991  /*
1992  * Drive powered-up in standby mode, and requires a specific
1993  * SET_FEATURES spin-up subcommand before it will accept
1994  * anything other than the original IDENTIFY command.
1995  */
1996  err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1997  if (err_mask && id[2] != 0x738c) {
1998  rc = -EIO;
1999  reason = "SPINUP failed";
2000  goto err_out;
2001  }
2002  /*
2003  * If the drive initially returned incomplete IDENTIFY info,
2004  * we now must reissue the IDENTIFY command.
2005  */
2006  if (id[2] == 0x37c8)
2007  goto retry;
2008  }
2009 
2010  if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2011  /*
2012  * The exact sequence expected by certain pre-ATA4 drives is:
2013  * SRST RESET
2014  * IDENTIFY (optional in early ATA)
2015  * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2016  * anything else..
2017  * Some drives were very specific about that exact sequence.
2018  *
2019  * Note that ATA4 says lba is mandatory so the second check
2020  * should never trigger.
2021  */
2022  if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2023  err_mask = ata_dev_init_params(dev, id[3], id[6]);
2024  if (err_mask) {
2025  rc = -EIO;
2026  reason = "INIT_DEV_PARAMS failed";
2027  goto err_out;
2028  }
2029 
2030  /* current CHS translation info (id[53-58]) might be
2031  * changed. reread the identify device info.
2032  */
2033  flags &= ~ATA_READID_POSTRESET;
2034  goto retry;
2035  }
2036  }
2037 
2038  *p_class = class;
2039 
2040  return 0;
2041 
2042  err_out:
2043  if (ata_msg_warn(ap))
2044  ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2045  reason, err_mask);
2046  return rc;
2047 }
2048 
2049 static int ata_do_link_spd_horkage(struct ata_device *dev)
2050 {
2051  struct ata_link *plink = ata_dev_phys_link(dev);
2052  u32 target, target_limit;
2053 
2054  if (!sata_scr_valid(plink))
2055  return 0;
2056 
2057  if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2058  target = 1;
2059  else
2060  return 0;
2061 
2062  target_limit = (1 << target) - 1;
2063 
2064  /* if already on stricter limit, no need to push further */
2065  if (plink->sata_spd_limit <= target_limit)
2066  return 0;
2067 
2068  plink->sata_spd_limit = target_limit;
2069 
2070  /* Request another EH round by returning -EAGAIN if link is
2071  * going faster than the target speed. Forward progress is
2072  * guaranteed by setting sata_spd_limit to target_limit above.
2073  */
2074  if (plink->sata_spd > target) {
2075  ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2076  sata_spd_string(target));
2077  return -EAGAIN;
2078  }
2079  return 0;
2080 }
2081 
2082 static inline u8 ata_dev_knobble(struct ata_device *dev)
2083 {
2084  struct ata_port *ap = dev->link->ap;
2085 
2086  if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2087  return 0;
2088 
2089  return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2090 }
2091 
2092 static int ata_dev_config_ncq(struct ata_device *dev,
2093  char *desc, size_t desc_sz)
2094 {
2095  struct ata_port *ap = dev->link->ap;
2096  int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2097  unsigned int err_mask;
2098  char *aa_desc = "";
2099 
2100  if (!ata_id_has_ncq(dev->id)) {
2101  desc[0] = '\0';
2102  return 0;
2103  }
2104  if (dev->horkage & ATA_HORKAGE_NONCQ) {
2105  snprintf(desc, desc_sz, "NCQ (not used)");
2106  return 0;
2107  }
2108  if (ap->flags & ATA_FLAG_NCQ) {
2109  hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2110  dev->flags |= ATA_DFLAG_NCQ;
2111  }
2112 
2113  if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2114  (ap->flags & ATA_FLAG_FPDMA_AA) &&
2115  ata_id_has_fpdma_aa(dev->id)) {
2117  SATA_FPDMA_AA);
2118  if (err_mask) {
2119  ata_dev_err(dev,
2120  "failed to enable AA (error_mask=0x%x)\n",
2121  err_mask);
2122  if (err_mask != AC_ERR_DEV) {
2124  return -EIO;
2125  }
2126  } else
2127  aa_desc = ", AA";
2128  }
2129 
2130  if (hdepth >= ddepth)
2131  snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2132  else
2133  snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2134  ddepth, aa_desc);
2135  return 0;
2136 }
2137 
2152 {
2153  struct ata_port *ap = dev->link->ap;
2154  struct ata_eh_context *ehc = &dev->link->eh_context;
2155  int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2156  const u16 *id = dev->id;
2157  unsigned long xfer_mask;
2158  unsigned int err_mask;
2159  char revbuf[7]; /* XYZ-99\0 */
2160  char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2161  char modelbuf[ATA_ID_PROD_LEN+1];
2162  int rc;
2163 
2164  if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2165  ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2166  return 0;
2167  }
2168 
2169  if (ata_msg_probe(ap))
2170  ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2171 
2172  /* set horkage */
2173  dev->horkage |= ata_dev_blacklisted(dev);
2174  ata_force_horkage(dev);
2175 
2176  if (dev->horkage & ATA_HORKAGE_DISABLE) {
2177  ata_dev_info(dev, "unsupported device, disabling\n");
2178  ata_dev_disable(dev);
2179  return 0;
2180  }
2181 
2182  if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2183  dev->class == ATA_DEV_ATAPI) {
2184  ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2185  atapi_enabled ? "not supported with this driver"
2186  : "disabled");
2187  ata_dev_disable(dev);
2188  return 0;
2189  }
2190 
2191  rc = ata_do_link_spd_horkage(dev);
2192  if (rc)
2193  return rc;
2194 
2195  /* let ACPI work its magic */
2196  rc = ata_acpi_on_devcfg(dev);
2197  if (rc)
2198  return rc;
2199 
2200  /* massage HPA, do it early as it might change IDENTIFY data */
2201  rc = ata_hpa_resize(dev);
2202  if (rc)
2203  return rc;
2204 
2205  /* print device capabilities */
2206  if (ata_msg_probe(ap))
2207  ata_dev_dbg(dev,
2208  "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2209  "85:%04x 86:%04x 87:%04x 88:%04x\n",
2210  __func__,
2211  id[49], id[82], id[83], id[84],
2212  id[85], id[86], id[87], id[88]);
2213 
2214  /* initialize to-be-configured parameters */
2215  dev->flags &= ~ATA_DFLAG_CFG_MASK;
2216  dev->max_sectors = 0;
2217  dev->cdb_len = 0;
2218  dev->n_sectors = 0;
2219  dev->cylinders = 0;
2220  dev->heads = 0;
2221  dev->sectors = 0;
2222  dev->multi_count = 0;
2223 
2224  /*
2225  * common ATA, ATAPI feature tests
2226  */
2227 
2228  /* find max transfer mode; for printk only */
2229  xfer_mask = ata_id_xfermask(id);
2230 
2231  if (ata_msg_probe(ap))
2232  ata_dump_id(id);
2233 
2234  /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2235  ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2236  sizeof(fwrevbuf));
2237 
2238  ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2239  sizeof(modelbuf));
2240 
2241  /* ATA-specific feature tests */
2242  if (dev->class == ATA_DEV_ATA) {
2243  if (ata_id_is_cfa(id)) {
2244  /* CPRM may make this media unusable */
2245  if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2246  ata_dev_warn(dev,
2247  "supports DRM functions and may not be fully accessible\n");
2248  snprintf(revbuf, 7, "CFA");
2249  } else {
2250  snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2251  /* Warn the user if the device has TPM extensions */
2252  if (ata_id_has_tpm(id))
2253  ata_dev_warn(dev,
2254  "supports DRM functions and may not be fully accessible\n");
2255  }
2256 
2257  dev->n_sectors = ata_id_n_sectors(id);
2258 
2259  /* get current R/W Multiple count setting */
2260  if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2261  unsigned int max = dev->id[47] & 0xff;
2262  unsigned int cnt = dev->id[59] & 0xff;
2263  /* only recognize/allow powers of two here */
2264  if (is_power_of_2(max) && is_power_of_2(cnt))
2265  if (cnt <= max)
2266  dev->multi_count = cnt;
2267  }
2268 
2269  if (ata_id_has_lba(id)) {
2270  const char *lba_desc;
2271  char ncq_desc[24];
2272 
2273  lba_desc = "LBA";
2274  dev->flags |= ATA_DFLAG_LBA;
2275  if (ata_id_has_lba48(id)) {
2276  dev->flags |= ATA_DFLAG_LBA48;
2277  lba_desc = "LBA48";
2278 
2279  if (dev->n_sectors >= (1UL << 28) &&
2280  ata_id_has_flush_ext(id))
2281  dev->flags |= ATA_DFLAG_FLUSH_EXT;
2282  }
2283 
2284  /* config NCQ */
2285  rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2286  if (rc)
2287  return rc;
2288 
2289  /* print device info to dmesg */
2290  if (ata_msg_drv(ap) && print_info) {
2291  ata_dev_info(dev, "%s: %s, %s, max %s\n",
2292  revbuf, modelbuf, fwrevbuf,
2293  ata_mode_string(xfer_mask));
2294  ata_dev_info(dev,
2295  "%llu sectors, multi %u: %s %s\n",
2296  (unsigned long long)dev->n_sectors,
2297  dev->multi_count, lba_desc, ncq_desc);
2298  }
2299  } else {
2300  /* CHS */
2301 
2302  /* Default translation */
2303  dev->cylinders = id[1];
2304  dev->heads = id[3];
2305  dev->sectors = id[6];
2306 
2307  if (ata_id_current_chs_valid(id)) {
2308  /* Current CHS translation is valid. */
2309  dev->cylinders = id[54];
2310  dev->heads = id[55];
2311  dev->sectors = id[56];
2312  }
2313 
2314  /* print device info to dmesg */
2315  if (ata_msg_drv(ap) && print_info) {
2316  ata_dev_info(dev, "%s: %s, %s, max %s\n",
2317  revbuf, modelbuf, fwrevbuf,
2318  ata_mode_string(xfer_mask));
2319  ata_dev_info(dev,
2320  "%llu sectors, multi %u, CHS %u/%u/%u\n",
2321  (unsigned long long)dev->n_sectors,
2322  dev->multi_count, dev->cylinders,
2323  dev->heads, dev->sectors);
2324  }
2325  }
2326 
2327  /* check and mark DevSlp capability */
2328  if (ata_id_has_devslp(dev->id))
2329  dev->flags |= ATA_DFLAG_DEVSLP;
2330 
2331  /* Obtain SATA Settings page from Identify Device Data Log,
2332  * which contains DevSlp timing variables etc.
2333  * Exclude old devices with ata_id_has_ncq()
2334  */
2335  if (ata_id_has_ncq(dev->id)) {
2336  err_mask = ata_read_log_page(dev,
2339  dev->sata_settings,
2340  1);
2341  if (err_mask)
2342  ata_dev_dbg(dev,
2343  "failed to get Identify Device Data, Emask 0x%x\n",
2344  err_mask);
2345  }
2346 
2347  dev->cdb_len = 16;
2348  }
2349 
2350  /* ATAPI-specific feature tests */
2351  else if (dev->class == ATA_DEV_ATAPI) {
2352  const char *cdb_intr_string = "";
2353  const char *atapi_an_string = "";
2354  const char *dma_dir_string = "";
2355  u32 sntf;
2356 
2357  rc = atapi_cdb_len(id);
2358  if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2359  if (ata_msg_warn(ap))
2360  ata_dev_warn(dev, "unsupported CDB len\n");
2361  rc = -EINVAL;
2362  goto err_out_nosup;
2363  }
2364  dev->cdb_len = (unsigned int) rc;
2365 
2366  /* Enable ATAPI AN if both the host and device have
2367  * the support. If PMP is attached, SNTF is required
2368  * to enable ATAPI AN to discern between PHY status
2369  * changed notifications and ATAPI ANs.
2370  */
2371  if (atapi_an &&
2372  (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2373  (!sata_pmp_attached(ap) ||
2374  sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2375  /* issue SET feature command to turn this on */
2376  err_mask = ata_dev_set_feature(dev,
2378  if (err_mask)
2379  ata_dev_err(dev,
2380  "failed to enable ATAPI AN (err_mask=0x%x)\n",
2381  err_mask);
2382  else {
2383  dev->flags |= ATA_DFLAG_AN;
2384  atapi_an_string = ", ATAPI AN";
2385  }
2386  }
2387 
2388  if (ata_id_cdb_intr(dev->id)) {
2389  dev->flags |= ATA_DFLAG_CDB_INTR;
2390  cdb_intr_string = ", CDB intr";
2391  }
2392 
2393  if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2394  dev->flags |= ATA_DFLAG_DMADIR;
2395  dma_dir_string = ", DMADIR";
2396  }
2397 
2398  if (ata_id_has_da(dev->id))
2399  dev->flags |= ATA_DFLAG_DA;
2400 
2401  /* print device info to dmesg */
2402  if (ata_msg_drv(ap) && print_info)
2403  ata_dev_info(dev,
2404  "ATAPI: %s, %s, max %s%s%s%s\n",
2405  modelbuf, fwrevbuf,
2406  ata_mode_string(xfer_mask),
2407  cdb_intr_string, atapi_an_string,
2408  dma_dir_string);
2409  }
2410 
2411  /* determine max_sectors */
2413  if (dev->flags & ATA_DFLAG_LBA48)
2415 
2416  /* Limit PATA drive on SATA cable bridge transfers to udma5,
2417  200 sectors */
2418  if (ata_dev_knobble(dev)) {
2419  if (ata_msg_drv(ap) && print_info)
2420  ata_dev_info(dev, "applying bridge limits\n");
2421  dev->udma_mask &= ATA_UDMA5;
2423  }
2424 
2425  if ((dev->class == ATA_DEV_ATAPI) &&
2426  (atapi_command_packet_set(id) == TYPE_TAPE)) {
2429  }
2430 
2431  if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2432  dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2433  dev->max_sectors);
2434 
2435  if (ap->ops->dev_config)
2436  ap->ops->dev_config(dev);
2437 
2438  if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2439  /* Let the user know. We don't want to disallow opens for
2440  rescue purposes, or in case the vendor is just a blithering
2441  idiot. Do this after the dev_config call as some controllers
2442  with buggy firmware may want to avoid reporting false device
2443  bugs */
2444 
2445  if (print_info) {
2446  ata_dev_warn(dev,
2447 "Drive reports diagnostics failure. This may indicate a drive\n");
2448  ata_dev_warn(dev,
2449 "fault or invalid emulation. Contact drive vendor for information.\n");
2450  }
2451  }
2452 
2453  if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2454  ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2455  ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2456  }
2457 
2458  return 0;
2459 
2460 err_out_nosup:
2461  if (ata_msg_probe(ap))
2462  ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2463  return rc;
2464 }
2465 
2475 {
2476  return ATA_CBL_PATA40;
2477 }
2478 
2488 {
2489  return ATA_CBL_PATA80;
2490 }
2491 
2500 {
2501  return ATA_CBL_PATA_UNK;
2502 }
2503 
2512 {
2513  return ATA_CBL_PATA_IGN;
2514 }
2515 
2523 int ata_cable_sata(struct ata_port *ap)
2524 {
2525  return ATA_CBL_SATA;
2526 }
2527 
2543 int ata_bus_probe(struct ata_port *ap)
2544 {
2545  unsigned int classes[ATA_MAX_DEVICES];
2546  int tries[ATA_MAX_DEVICES];
2547  int rc;
2548  struct ata_device *dev;
2549 
2550  ata_for_each_dev(dev, &ap->link, ALL)
2551  tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2552 
2553  retry:
2554  ata_for_each_dev(dev, &ap->link, ALL) {
2555  /* If we issue an SRST then an ATA drive (not ATAPI)
2556  * may change configuration and be in PIO0 timing. If
2557  * we do a hard reset (or are coming from power on)
2558  * this is true for ATA or ATAPI. Until we've set a
2559  * suitable controller mode we should not touch the
2560  * bus as we may be talking too fast.
2561  */
2562  dev->pio_mode = XFER_PIO_0;
2563 
2564  /* If the controller has a pio mode setup function
2565  * then use it to set the chipset to rights. Don't
2566  * touch the DMA setup as that will be dealt with when
2567  * configuring devices.
2568  */
2569  if (ap->ops->set_piomode)
2570  ap->ops->set_piomode(ap, dev);
2571  }
2572 
2573  /* reset and determine device classes */
2574  ap->ops->phy_reset(ap);
2575 
2576  ata_for_each_dev(dev, &ap->link, ALL) {
2577  if (dev->class != ATA_DEV_UNKNOWN)
2578  classes[dev->devno] = dev->class;
2579  else
2580  classes[dev->devno] = ATA_DEV_NONE;
2581 
2582  dev->class = ATA_DEV_UNKNOWN;
2583  }
2584 
2585  /* read IDENTIFY page and configure devices. We have to do the identify
2586  specific sequence bass-ackwards so that PDIAG- is released by
2587  the slave device */
2588 
2589  ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2590  if (tries[dev->devno])
2591  dev->class = classes[dev->devno];
2592 
2593  if (!ata_dev_enabled(dev))
2594  continue;
2595 
2596  rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2597  dev->id);
2598  if (rc)
2599  goto fail;
2600  }
2601 
2602  /* Now ask for the cable type as PDIAG- should have been released */
2603  if (ap->ops->cable_detect)
2604  ap->cbl = ap->ops->cable_detect(ap);
2605 
2606  /* We may have SATA bridge glue hiding here irrespective of
2607  * the reported cable types and sensed types. When SATA
2608  * drives indicate we have a bridge, we don't know which end
2609  * of the link the bridge is which is a problem.
2610  */
2611  ata_for_each_dev(dev, &ap->link, ENABLED)
2612  if (ata_id_is_sata(dev->id))
2613  ap->cbl = ATA_CBL_SATA;
2614 
2615  /* After the identify sequence we can now set up the devices. We do
2616  this in the normal order so that the user doesn't get confused */
2617 
2618  ata_for_each_dev(dev, &ap->link, ENABLED) {
2619  ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2620  rc = ata_dev_configure(dev);
2621  ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2622  if (rc)
2623  goto fail;
2624  }
2625 
2626  /* configure transfer mode */
2627  rc = ata_set_mode(&ap->link, &dev);
2628  if (rc)
2629  goto fail;
2630 
2631  ata_for_each_dev(dev, &ap->link, ENABLED)
2632  return 0;
2633 
2634  return -ENODEV;
2635 
2636  fail:
2637  tries[dev->devno]--;
2638 
2639  switch (rc) {
2640  case -EINVAL:
2641  /* eeek, something went very wrong, give up */
2642  tries[dev->devno] = 0;
2643  break;
2644 
2645  case -ENODEV:
2646  /* give it just one more chance */
2647  tries[dev->devno] = min(tries[dev->devno], 1);
2648  case -EIO:
2649  if (tries[dev->devno] == 1) {
2650  /* This is the last chance, better to slow
2651  * down than lose it.
2652  */
2653  sata_down_spd_limit(&ap->link, 0);
2655  }
2656  }
2657 
2658  if (!tries[dev->devno])
2659  ata_dev_disable(dev);
2660 
2661  goto retry;
2662 }
2663 
2673 static void sata_print_link_status(struct ata_link *link)
2674 {
2675  u32 sstatus, scontrol, tmp;
2676 
2677  if (sata_scr_read(link, SCR_STATUS, &sstatus))
2678  return;
2679  sata_scr_read(link, SCR_CONTROL, &scontrol);
2680 
2681  if (ata_phys_link_online(link)) {
2682  tmp = (sstatus >> 4) & 0xf;
2683  ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2684  sata_spd_string(tmp), sstatus, scontrol);
2685  } else {
2686  ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2687  sstatus, scontrol);
2688  }
2689 }
2690 
2699 struct ata_device *ata_dev_pair(struct ata_device *adev)
2700 {
2701  struct ata_link *link = adev->link;
2702  struct ata_device *pair = &link->device[1 - adev->devno];
2703  if (!ata_dev_enabled(pair))
2704  return NULL;
2705  return pair;
2706 }
2707 
2728 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2729 {
2730  u32 sstatus, spd, mask;
2731  int rc, bit;
2732 
2733  if (!sata_scr_valid(link))
2734  return -EOPNOTSUPP;
2735 
2736  /* If SCR can be read, use it to determine the current SPD.
2737  * If not, use cached value in link->sata_spd.
2738  */
2739  rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2740  if (rc == 0 && ata_sstatus_online(sstatus))
2741  spd = (sstatus >> 4) & 0xf;
2742  else
2743  spd = link->sata_spd;
2744 
2745  mask = link->sata_spd_limit;
2746  if (mask <= 1)
2747  return -EINVAL;
2748 
2749  /* unconditionally mask off the highest bit */
2750  bit = fls(mask) - 1;
2751  mask &= ~(1 << bit);
2752 
2753  /* Mask off all speeds higher than or equal to the current
2754  * one. Force 1.5Gbps if current SPD is not available.
2755  */
2756  if (spd > 1)
2757  mask &= (1 << (spd - 1)) - 1;
2758  else
2759  mask &= 1;
2760 
2761  /* were we already at the bottom? */
2762  if (!mask)
2763  return -EINVAL;
2764 
2765  if (spd_limit) {
2766  if (mask & ((1 << spd_limit) - 1))
2767  mask &= (1 << spd_limit) - 1;
2768  else {
2769  bit = ffs(mask) - 1;
2770  mask = 1 << bit;
2771  }
2772  }
2773 
2774  link->sata_spd_limit = mask;
2775 
2776  ata_link_warn(link, "limiting SATA link speed to %s\n",
2777  sata_spd_string(fls(mask)));
2778 
2779  return 0;
2780 }
2781 
2782 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2783 {
2784  struct ata_link *host_link = &link->ap->link;
2785  u32 limit, target, spd;
2786 
2787  limit = link->sata_spd_limit;
2788 
2789  /* Don't configure downstream link faster than upstream link.
2790  * It doesn't speed up anything and some PMPs choke on such
2791  * configuration.
2792  */
2793  if (!ata_is_host_link(link) && host_link->sata_spd)
2794  limit &= (1 << host_link->sata_spd) - 1;
2795 
2796  if (limit == UINT_MAX)
2797  target = 0;
2798  else
2799  target = fls(limit);
2800 
2801  spd = (*scontrol >> 4) & 0xf;
2802  *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2803 
2804  return spd != target;
2805 }
2806 
2822 static int sata_set_spd_needed(struct ata_link *link)
2823 {
2824  u32 scontrol;
2825 
2826  if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2827  return 1;
2828 
2829  return __sata_set_spd_needed(link, &scontrol);
2830 }
2831 
2845 int sata_set_spd(struct ata_link *link)
2846 {
2847  u32 scontrol;
2848  int rc;
2849 
2850  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2851  return rc;
2852 
2853  if (!__sata_set_spd_needed(link, &scontrol))
2854  return 0;
2855 
2856  if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2857  return rc;
2858 
2859  return 1;
2860 }
2861 
2862 /*
2863  * This mode timing computation functionality is ported over from
2864  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2865  */
2866 /*
2867  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2868  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2869  * for UDMA6, which is currently supported only by Maxtor drives.
2870  *
2871  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2872  */
2873 
2874 static const struct ata_timing ata_timing[] = {
2875 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2876  { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2877  { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2878  { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2879  { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2880  { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2881  { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2882  { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2883 
2884  { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2885  { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2886  { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2887 
2888  { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2889  { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2890  { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2891  { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2892  { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2893 
2894 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2895  { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2896  { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2897  { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2898  { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2899  { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2900  { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2901  { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2902 
2903  { 0xFF }
2904 };
2905 
2906 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2907 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2908 
2909 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2910 {
2911  q->setup = EZ(t->setup * 1000, T);
2912  q->act8b = EZ(t->act8b * 1000, T);
2913  q->rec8b = EZ(t->rec8b * 1000, T);
2914  q->cyc8b = EZ(t->cyc8b * 1000, T);
2915  q->active = EZ(t->active * 1000, T);
2916  q->recover = EZ(t->recover * 1000, T);
2917  q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2918  q->cycle = EZ(t->cycle * 1000, T);
2919  q->udma = EZ(t->udma * 1000, UT);
2920 }
2921 
2922 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2923  struct ata_timing *m, unsigned int what)
2924 {
2925  if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2926  if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2927  if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2928  if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2929  if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2930  if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2931  if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2932  if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2933  if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2934 }
2935 
2936 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2937 {
2938  const struct ata_timing *t = ata_timing;
2939 
2940  while (xfer_mode > t->mode)
2941  t++;
2942 
2943  if (xfer_mode == t->mode)
2944  return t;
2945 
2946  WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2947  __func__, xfer_mode);
2948 
2949  return NULL;
2950 }
2951 
2952 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2953  struct ata_timing *t, int T, int UT)
2954 {
2955  const u16 *id = adev->id;
2956  const struct ata_timing *s;
2957  struct ata_timing p;
2958 
2959  /*
2960  * Find the mode.
2961  */
2962 
2963  if (!(s = ata_timing_find_mode(speed)))
2964  return -EINVAL;
2965 
2966  memcpy(t, s, sizeof(*s));
2967 
2968  /*
2969  * If the drive is an EIDE drive, it can tell us it needs extended
2970  * PIO/MW_DMA cycle timing.
2971  */
2972 
2973  if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2974  memset(&p, 0, sizeof(p));
2975 
2976  if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
2977  if (speed <= XFER_PIO_2)
2978  p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
2979  else if ((speed <= XFER_PIO_4) ||
2980  (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
2981  p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
2982  } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
2983  p.cycle = id[ATA_ID_EIDE_DMA_MIN];
2984 
2986  }
2987 
2988  /*
2989  * Convert the timing to bus clock counts.
2990  */
2991 
2992  ata_timing_quantize(t, t, T, UT);
2993 
2994  /*
2995  * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2996  * S.M.A.R.T * and some other commands. We have to ensure that the
2997  * DMA cycle timing is slower/equal than the fastest PIO timing.
2998  */
2999 
3000  if (speed > XFER_PIO_6) {
3001  ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3002  ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3003  }
3004 
3005  /*
3006  * Lengthen active & recovery time so that cycle time is correct.
3007  */
3008 
3009  if (t->act8b + t->rec8b < t->cyc8b) {
3010  t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3011  t->rec8b = t->cyc8b - t->act8b;
3012  }
3013 
3014  if (t->active + t->recover < t->cycle) {
3015  t->active += (t->cycle - (t->active + t->recover)) / 2;
3016  t->recover = t->cycle - t->active;
3017  }
3018 
3019  /* In a few cases quantisation may produce enough errors to
3020  leave t->cycle too low for the sum of active and recovery
3021  if so we must correct this */
3022  if (t->active + t->recover > t->cycle)
3023  t->cycle = t->active + t->recover;
3024 
3025  return 0;
3026 }
3027 
3044 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3045 {
3046  u8 base_mode = 0xff, last_mode = 0xff;
3047  const struct ata_xfer_ent *ent;
3048  const struct ata_timing *t;
3049 
3050  for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3051  if (ent->shift == xfer_shift)
3052  base_mode = ent->base;
3053 
3054  for (t = ata_timing_find_mode(base_mode);
3055  t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3056  unsigned short this_cycle;
3057 
3058  switch (xfer_shift) {
3059  case ATA_SHIFT_PIO:
3060  case ATA_SHIFT_MWDMA:
3061  this_cycle = t->cycle;
3062  break;
3063  case ATA_SHIFT_UDMA:
3064  this_cycle = t->udma;
3065  break;
3066  default:
3067  return 0xff;
3068  }
3069 
3070  if (cycle > this_cycle)
3071  break;
3072 
3073  last_mode = t->mode;
3074  }
3075 
3076  return last_mode;
3077 }
3078 
3094 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3095 {
3096  char buf[32];
3097  unsigned long orig_mask, xfer_mask;
3098  unsigned long pio_mask, mwdma_mask, udma_mask;
3099  int quiet, highbit;
3100 
3101  quiet = !!(sel & ATA_DNXFER_QUIET);
3102  sel &= ~ATA_DNXFER_QUIET;
3103 
3104  xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3105  dev->mwdma_mask,
3106  dev->udma_mask);
3107  ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3108 
3109  switch (sel) {
3110  case ATA_DNXFER_PIO:
3111  highbit = fls(pio_mask) - 1;
3112  pio_mask &= ~(1 << highbit);
3113  break;
3114 
3115  case ATA_DNXFER_DMA:
3116  if (udma_mask) {
3117  highbit = fls(udma_mask) - 1;
3118  udma_mask &= ~(1 << highbit);
3119  if (!udma_mask)
3120  return -ENOENT;
3121  } else if (mwdma_mask) {
3122  highbit = fls(mwdma_mask) - 1;
3123  mwdma_mask &= ~(1 << highbit);
3124  if (!mwdma_mask)
3125  return -ENOENT;
3126  }
3127  break;
3128 
3129  case ATA_DNXFER_40C:
3130  udma_mask &= ATA_UDMA_MASK_40C;
3131  break;
3132 
3133  case ATA_DNXFER_FORCE_PIO0:
3134  pio_mask &= 1;
3135  case ATA_DNXFER_FORCE_PIO:
3136  mwdma_mask = 0;
3137  udma_mask = 0;
3138  break;
3139 
3140  default:
3141  BUG();
3142  }
3143 
3144  xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3145 
3146  if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3147  return -ENOENT;
3148 
3149  if (!quiet) {
3150  if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3151  snprintf(buf, sizeof(buf), "%s:%s",
3152  ata_mode_string(xfer_mask),
3153  ata_mode_string(xfer_mask & ATA_MASK_PIO));
3154  else
3155  snprintf(buf, sizeof(buf), "%s",
3156  ata_mode_string(xfer_mask));
3157 
3158  ata_dev_warn(dev, "limiting speed to %s\n", buf);
3159  }
3160 
3161  ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3162  &dev->udma_mask);
3163 
3164  return 0;
3165 }
3166 
3167 static int ata_dev_set_mode(struct ata_device *dev)
3168 {
3169  struct ata_port *ap = dev->link->ap;
3170  struct ata_eh_context *ehc = &dev->link->eh_context;
3171  const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3172  const char *dev_err_whine = "";
3173  int ign_dev_err = 0;
3174  unsigned int err_mask = 0;
3175  int rc;
3176 
3177  dev->flags &= ~ATA_DFLAG_PIO;
3178  if (dev->xfer_shift == ATA_SHIFT_PIO)
3179  dev->flags |= ATA_DFLAG_PIO;
3180 
3181  if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3182  dev_err_whine = " (SET_XFERMODE skipped)";
3183  else {
3184  if (nosetxfer)
3185  ata_dev_warn(dev,
3186  "NOSETXFER but PATA detected - can't "
3187  "skip SETXFER, might malfunction\n");
3188  err_mask = ata_dev_set_xfermode(dev);
3189  }
3190 
3191  if (err_mask & ~AC_ERR_DEV)
3192  goto fail;
3193 
3194  /* revalidate */
3195  ehc->i.flags |= ATA_EHI_POST_SETMODE;
3196  rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3197  ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3198  if (rc)
3199  return rc;
3200 
3201  if (dev->xfer_shift == ATA_SHIFT_PIO) {
3202  /* Old CFA may refuse this command, which is just fine */
3203  if (ata_id_is_cfa(dev->id))
3204  ign_dev_err = 1;
3205  /* Catch several broken garbage emulations plus some pre
3206  ATA devices */
3207  if (ata_id_major_version(dev->id) == 0 &&
3208  dev->pio_mode <= XFER_PIO_2)
3209  ign_dev_err = 1;
3210  /* Some very old devices and some bad newer ones fail
3211  any kind of SET_XFERMODE request but support PIO0-2
3212  timings and no IORDY */
3213  if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3214  ign_dev_err = 1;
3215  }
3216  /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3217  Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3218  if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3219  dev->dma_mode == XFER_MW_DMA_0 &&
3220  (dev->id[63] >> 8) & 1)
3221  ign_dev_err = 1;
3222 
3223  /* if the device is actually configured correctly, ignore dev err */
3224  if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3225  ign_dev_err = 1;
3226 
3227  if (err_mask & AC_ERR_DEV) {
3228  if (!ign_dev_err)
3229  goto fail;
3230  else
3231  dev_err_whine = " (device error ignored)";
3232  }
3233 
3234  DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3235  dev->xfer_shift, (int)dev->xfer_mode);
3236 
3237  ata_dev_info(dev, "configured for %s%s\n",
3239  dev_err_whine);
3240 
3241  return 0;
3242 
3243  fail:
3244  ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3245  return -EIO;
3246 }
3247 
3265 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3266 {
3267  struct ata_port *ap = link->ap;
3268  struct ata_device *dev;
3269  int rc = 0, used_dma = 0, found = 0;
3270 
3271  /* step 1: calculate xfer_mask */
3272  ata_for_each_dev(dev, link, ENABLED) {
3273  unsigned long pio_mask, dma_mask;
3274  unsigned int mode_mask;
3275 
3276  mode_mask = ATA_DMA_MASK_ATA;
3277  if (dev->class == ATA_DEV_ATAPI)
3278  mode_mask = ATA_DMA_MASK_ATAPI;
3279  else if (ata_id_is_cfa(dev->id))
3280  mode_mask = ATA_DMA_MASK_CFA;
3281 
3282  ata_dev_xfermask(dev);
3283  ata_force_xfermask(dev);
3284 
3285  pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3286 
3287  if (libata_dma_mask & mode_mask)
3288  dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3289  dev->udma_mask);
3290  else
3291  dma_mask = 0;
3292 
3293  dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3294  dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3295 
3296  found = 1;
3297  if (ata_dma_enabled(dev))
3298  used_dma = 1;
3299  }
3300  if (!found)
3301  goto out;
3302 
3303  /* step 2: always set host PIO timings */
3304  ata_for_each_dev(dev, link, ENABLED) {
3305  if (dev->pio_mode == 0xff) {
3306  ata_dev_warn(dev, "no PIO support\n");
3307  rc = -EINVAL;
3308  goto out;
3309  }
3310 
3311  dev->xfer_mode = dev->pio_mode;
3312  dev->xfer_shift = ATA_SHIFT_PIO;
3313  if (ap->ops->set_piomode)
3314  ap->ops->set_piomode(ap, dev);
3315  }
3316 
3317  /* step 3: set host DMA timings */
3318  ata_for_each_dev(dev, link, ENABLED) {
3319  if (!ata_dma_enabled(dev))
3320  continue;
3321 
3322  dev->xfer_mode = dev->dma_mode;
3324  if (ap->ops->set_dmamode)
3325  ap->ops->set_dmamode(ap, dev);
3326  }
3327 
3328  /* step 4: update devices' xfer mode */
3329  ata_for_each_dev(dev, link, ENABLED) {
3330  rc = ata_dev_set_mode(dev);
3331  if (rc)
3332  goto out;
3333  }
3334 
3335  /* Record simplex status. If we selected DMA then the other
3336  * host channels are not permitted to do so.
3337  */
3338  if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3339  ap->host->simplex_claimed = ap;
3340 
3341  out:
3342  if (rc)
3343  *r_failed_dev = dev;
3344  return rc;
3345 }
3346 
3367 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3368  int (*check_ready)(struct ata_link *link))
3369 {
3370  unsigned long start = jiffies;
3371  unsigned long nodev_deadline;
3372  int warned = 0;
3373 
3374  /* choose which 0xff timeout to use, read comment in libata.h */
3375  if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3376  nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3377  else
3378  nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3379 
3380  /* Slave readiness can't be tested separately from master. On
3381  * M/S emulation configuration, this function should be called
3382  * only on the master and it will handle both master and slave.
3383  */
3384  WARN_ON(link == link->ap->slave_link);
3385 
3386  if (time_after(nodev_deadline, deadline))
3387  nodev_deadline = deadline;
3388 
3389  while (1) {
3390  unsigned long now = jiffies;
3391  int ready, tmp;
3392 
3393  ready = tmp = check_ready(link);
3394  if (ready > 0)
3395  return 0;
3396 
3397  /*
3398  * -ENODEV could be transient. Ignore -ENODEV if link
3399  * is online. Also, some SATA devices take a long
3400  * time to clear 0xff after reset. Wait for
3401  * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3402  * offline.
3403  *
3404  * Note that some PATA controllers (pata_ali) explode
3405  * if status register is read more than once when
3406  * there's no device attached.
3407  */
3408  if (ready == -ENODEV) {
3409  if (ata_link_online(link))
3410  ready = 0;
3411  else if ((link->ap->flags & ATA_FLAG_SATA) &&
3412  !ata_link_offline(link) &&
3413  time_before(now, nodev_deadline))
3414  ready = 0;
3415  }
3416 
3417  if (ready)
3418  return ready;
3419  if (time_after(now, deadline))
3420  return -EBUSY;
3421 
3422  if (!warned && time_after(now, start + 5 * HZ) &&
3423  (deadline - now > 3 * HZ)) {
3424  ata_link_warn(link,
3425  "link is slow to respond, please be patient "
3426  "(ready=%d)\n", tmp);
3427  warned = 1;
3428  }
3429 
3430  ata_msleep(link->ap, 50);
3431  }
3432 }
3433 
3448 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3449  int (*check_ready)(struct ata_link *link))
3450 {
3452 
3453  return ata_wait_ready(link, deadline, check_ready);
3454 }
3455 
3478 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3479  unsigned long deadline)
3480 {
3481  unsigned long interval = params[0];
3482  unsigned long duration = params[1];
3483  unsigned long last_jiffies, t;
3484  u32 last, cur;
3485  int rc;
3486 
3487  t = ata_deadline(jiffies, params[2]);
3488  if (time_before(t, deadline))
3489  deadline = t;
3490 
3491  if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3492  return rc;
3493  cur &= 0xf;
3494 
3495  last = cur;
3496  last_jiffies = jiffies;
3497 
3498  while (1) {
3499  ata_msleep(link->ap, interval);
3500  if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3501  return rc;
3502  cur &= 0xf;
3503 
3504  /* DET stable? */
3505  if (cur == last) {
3506  if (cur == 1 && time_before(jiffies, deadline))
3507  continue;
3508  if (time_after(jiffies,
3509  ata_deadline(last_jiffies, duration)))
3510  return 0;
3511  continue;
3512  }
3513 
3514  /* unstable, start over */
3515  last = cur;
3516  last_jiffies = jiffies;
3517 
3518  /* Check deadline. If debouncing failed, return
3519  * -EPIPE to tell upper layer to lower link speed.
3520  */
3521  if (time_after(jiffies, deadline))
3522  return -EPIPE;
3523  }
3524 }
3525 
3540 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3541  unsigned long deadline)
3542 {
3543  int tries = ATA_LINK_RESUME_TRIES;
3544  u32 scontrol, serror;
3545  int rc;
3546 
3547  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3548  return rc;
3549 
3550  /*
3551  * Writes to SControl sometimes get ignored under certain
3552  * controllers (ata_piix SIDPR). Make sure DET actually is
3553  * cleared.
3554  */
3555  do {
3556  scontrol = (scontrol & 0x0f0) | 0x300;
3557  if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3558  return rc;
3559  /*
3560  * Some PHYs react badly if SStatus is pounded
3561  * immediately after resuming. Delay 200ms before
3562  * debouncing.
3563  */
3564  ata_msleep(link->ap, 200);
3565 
3566  /* is SControl restored correctly? */
3567  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3568  return rc;
3569  } while ((scontrol & 0xf0f) != 0x300 && --tries);
3570 
3571  if ((scontrol & 0xf0f) != 0x300) {
3572  ata_link_warn(link, "failed to resume link (SControl %X)\n",
3573  scontrol);
3574  return 0;
3575  }
3576 
3577  if (tries < ATA_LINK_RESUME_TRIES)
3578  ata_link_warn(link, "link resume succeeded after %d retries\n",
3579  ATA_LINK_RESUME_TRIES - tries);
3580 
3581  if ((rc = sata_link_debounce(link, params, deadline)))
3582  return rc;
3583 
3584  /* clear SError, some PHYs require this even for SRST to work */
3585  if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3586  rc = sata_scr_write(link, SCR_ERROR, serror);
3587 
3588  return rc != -EINVAL ? rc : 0;
3589 }
3590 
3609 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3610  bool spm_wakeup)
3611 {
3612  struct ata_eh_context *ehc = &link->eh_context;
3613  bool woken_up = false;
3614  u32 scontrol;
3615  int rc;
3616 
3617  rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3618  if (rc)
3619  return rc;
3620 
3621  switch (policy) {
3622  case ATA_LPM_MAX_POWER:
3623  /* disable all LPM transitions */
3624  scontrol |= (0x7 << 8);
3625  /* initiate transition to active state */
3626  if (spm_wakeup) {
3627  scontrol |= (0x4 << 12);
3628  woken_up = true;
3629  }
3630  break;
3631  case ATA_LPM_MED_POWER:
3632  /* allow LPM to PARTIAL */
3633  scontrol &= ~(0x1 << 8);
3634  scontrol |= (0x6 << 8);
3635  break;
3636  case ATA_LPM_MIN_POWER:
3637  if (ata_link_nr_enabled(link) > 0)
3638  /* no restrictions on LPM transitions */
3639  scontrol &= ~(0x7 << 8);
3640  else {
3641  /* empty port, power off */
3642  scontrol &= ~0xf;
3643  scontrol |= (0x1 << 2);
3644  }
3645  break;
3646  default:
3647  WARN_ON(1);
3648  }
3649 
3650  rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3651  if (rc)
3652  return rc;
3653 
3654  /* give the link time to transit out of LPM state */
3655  if (woken_up)
3656  msleep(10);
3657 
3658  /* clear PHYRDY_CHG from SError */
3659  ehc->i.serror &= ~SERR_PHYRDY_CHG;
3660  return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3661 }
3662 
3680 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3681 {
3682  struct ata_port *ap = link->ap;
3683  struct ata_eh_context *ehc = &link->eh_context;
3684  const unsigned long *timing = sata_ehc_deb_timing(ehc);
3685  int rc;
3686 
3687  /* if we're about to do hardreset, nothing more to do */
3688  if (ehc->i.action & ATA_EH_HARDRESET)
3689  return 0;
3690 
3691  /* if SATA, resume link */
3692  if (ap->flags & ATA_FLAG_SATA) {
3693  rc = sata_link_resume(link, timing, deadline);
3694  /* whine about phy resume failure but proceed */
3695  if (rc && rc != -EOPNOTSUPP)
3696  ata_link_warn(link,
3697  "failed to resume link for reset (errno=%d)\n",
3698  rc);
3699  }
3700 
3701  /* no point in trying softreset on offline link */
3702  if (ata_phys_link_offline(link))
3703  ehc->i.action &= ~ATA_EH_SOFTRESET;
3704 
3705  return 0;
3706 }
3707 
3732 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3733  unsigned long deadline,
3734  bool *online, int (*check_ready)(struct ata_link *))
3735 {
3736  u32 scontrol;
3737  int rc;
3738 
3739  DPRINTK("ENTER\n");
3740 
3741  if (online)
3742  *online = false;
3743 
3744  if (sata_set_spd_needed(link)) {
3745  /* SATA spec says nothing about how to reconfigure
3746  * spd. To be on the safe side, turn off phy during
3747  * reconfiguration. This works for at least ICH7 AHCI
3748  * and Sil3124.
3749  */
3750  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3751  goto out;
3752 
3753  scontrol = (scontrol & 0x0f0) | 0x304;
3754 
3755  if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3756  goto out;
3757 
3758  sata_set_spd(link);
3759  }
3760 
3761  /* issue phy wake/reset */
3762  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3763  goto out;
3764 
3765  scontrol = (scontrol & 0x0f0) | 0x301;
3766 
3767  if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3768  goto out;
3769 
3770  /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3771  * 10.4.2 says at least 1 ms.
3772  */
3773  ata_msleep(link->ap, 1);
3774 
3775  /* bring link back */
3776  rc = sata_link_resume(link, timing, deadline);
3777  if (rc)
3778  goto out;
3779  /* if link is offline nothing more to do */
3780  if (ata_phys_link_offline(link))
3781  goto out;
3782 
3783  /* Link is online. From this point, -ENODEV too is an error. */
3784  if (online)
3785  *online = true;
3786 
3787  if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3788  /* If PMP is supported, we have to do follow-up SRST.
3789  * Some PMPs don't send D2H Reg FIS after hardreset if
3790  * the first port is empty. Wait only for
3791  * ATA_TMOUT_PMP_SRST_WAIT.
3792  */
3793  if (check_ready) {
3794  unsigned long pmp_deadline;
3795 
3796  pmp_deadline = ata_deadline(jiffies,
3798  if (time_after(pmp_deadline, deadline))
3799  pmp_deadline = deadline;
3800  ata_wait_ready(link, pmp_deadline, check_ready);
3801  }
3802  rc = -EAGAIN;
3803  goto out;
3804  }
3805 
3806  rc = 0;
3807  if (check_ready)
3808  rc = ata_wait_ready(link, deadline, check_ready);
3809  out:
3810  if (rc && rc != -EAGAIN) {
3811  /* online is set iff link is online && reset succeeded */
3812  if (online)
3813  *online = false;
3814  ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3815  }
3816  DPRINTK("EXIT, rc=%d\n", rc);
3817  return rc;
3818 }
3819 
3834 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3835  unsigned long deadline)
3836 {
3837  const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3838  bool online;
3839  int rc;
3840 
3841  /* do hardreset */
3842  rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3843  return online ? -EAGAIN : rc;
3844 }
3845 
3858 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3859 {
3860  u32 serror;
3861 
3862  DPRINTK("ENTER\n");
3863 
3864  /* reset complete, clear SError */
3865  if (!sata_scr_read(link, SCR_ERROR, &serror))
3866  sata_scr_write(link, SCR_ERROR, serror);
3867 
3868  /* print link status */
3869  sata_print_link_status(link);
3870 
3871  DPRINTK("EXIT\n");
3872 }
3873 
3890 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3891  const u16 *new_id)
3892 {
3893  const u16 *old_id = dev->id;
3894  unsigned char model[2][ATA_ID_PROD_LEN + 1];
3895  unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3896 
3897  if (dev->class != new_class) {
3898  ata_dev_info(dev, "class mismatch %d != %d\n",
3899  dev->class, new_class);
3900  return 0;
3901  }
3902 
3903  ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3904  ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3905  ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3906  ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3907 
3908  if (strcmp(model[0], model[1])) {
3909  ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3910  model[0], model[1]);
3911  return 0;
3912  }
3913 
3914  if (strcmp(serial[0], serial[1])) {
3915  ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3916  serial[0], serial[1]);
3917  return 0;
3918  }
3919 
3920  return 1;
3921 }
3922 
3937 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3938 {
3939  unsigned int class = dev->class;
3940  u16 *id = (void *)dev->link->ap->sector_buf;
3941  int rc;
3942 
3943  /* read ID data */
3944  rc = ata_dev_read_id(dev, &class, readid_flags, id);
3945  if (rc)
3946  return rc;
3947 
3948  /* is the device still there? */
3949  if (!ata_dev_same_device(dev, class, id))
3950  return -ENODEV;
3951 
3952  memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3953  return 0;
3954 }
3955 
3971 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3972  unsigned int readid_flags)
3973 {
3974  u64 n_sectors = dev->n_sectors;
3975  u64 n_native_sectors = dev->n_native_sectors;
3976  int rc;
3977 
3978  if (!ata_dev_enabled(dev))
3979  return -ENODEV;
3980 
3981  /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3982  if (ata_class_enabled(new_class) &&
3983  new_class != ATA_DEV_ATA &&
3984  new_class != ATA_DEV_ATAPI &&
3985  new_class != ATA_DEV_SEMB) {
3986  ata_dev_info(dev, "class mismatch %u != %u\n",
3987  dev->class, new_class);
3988  rc = -ENODEV;
3989  goto fail;
3990  }
3991 
3992  /* re-read ID */
3993  rc = ata_dev_reread_id(dev, readid_flags);
3994  if (rc)
3995  goto fail;
3996 
3997  /* configure device according to the new ID */
3998  rc = ata_dev_configure(dev);
3999  if (rc)
4000  goto fail;
4001 
4002  /* verify n_sectors hasn't changed */
4003  if (dev->class != ATA_DEV_ATA || !n_sectors ||
4004  dev->n_sectors == n_sectors)
4005  return 0;
4006 
4007  /* n_sectors has changed */
4008  ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4009  (unsigned long long)n_sectors,
4010  (unsigned long long)dev->n_sectors);
4011 
4012  /*
4013  * Something could have caused HPA to be unlocked
4014  * involuntarily. If n_native_sectors hasn't changed and the
4015  * new size matches it, keep the device.
4016  */
4017  if (dev->n_native_sectors == n_native_sectors &&
4018  dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4019  ata_dev_warn(dev,
4020  "new n_sectors matches native, probably "
4021  "late HPA unlock, n_sectors updated\n");
4022  /* use the larger n_sectors */
4023  return 0;
4024  }
4025 
4026  /*
4027  * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4028  * unlocking HPA in those cases.
4029  *
4030  * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4031  */
4032  if (dev->n_native_sectors == n_native_sectors &&
4033  dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4034  !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4035  ata_dev_warn(dev,
4036  "old n_sectors matches native, probably "
4037  "late HPA lock, will try to unlock HPA\n");
4038  /* try unlocking HPA */
4039  dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4040  rc = -EIO;
4041  } else
4042  rc = -ENODEV;
4043 
4044  /* restore original n_[native_]sectors and fail */
4045  dev->n_native_sectors = n_native_sectors;
4046  dev->n_sectors = n_sectors;
4047  fail:
4048  ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4049  return rc;
4050 }
4051 
4053  const char *model_num;
4054  const char *model_rev;
4055  unsigned long horkage;
4056 };
4057 
4058 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4059  /* Devices with DMA related problems under Linux */
4060  { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4061  { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4062  { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4063  { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4064  { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4065  { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4066  { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4067  { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4068  { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4069  { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4070  { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4071  { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4072  { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4073  { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4074  { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4075  { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4076  { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4077  { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4078  { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4079  { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4080  { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4081  { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4082  { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4083  { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4084  { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4085  { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4086  { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4087  { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4088  { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4089  /* Odd clown on sil3726/4726 PMPs */
4090  { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4091 
4092  /* Weird ATAPI devices */
4093  { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4094  { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4095 
4096  /* Devices we expect to fail diagnostics */
4097 
4098  /* Devices where NCQ should be avoided */
4099  /* NCQ is slow */
4100  { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4101  { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4102  /* http://thread.gmane.org/gmane.linux.ide/14907 */
4103  { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4104  /* NCQ is broken */
4105  { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4106  { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4107  { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4108  { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4109  { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4110 
4111  /* Seagate NCQ + FLUSH CACHE firmware bug */
4112  { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4114 
4115  { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4117 
4118  { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4120 
4121  { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4123 
4124  /* Blacklist entries taken from Silicon Image 3124/3132
4125  Windows driver .inf file - also several Linux problem reports */
4126  { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4127  { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4128  { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4129 
4130  /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4131  { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4132 
4133  /* devices which puke on READ_NATIVE_MAX */
4134  { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4135  { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4136  { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4137  { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4138 
4139  /* this one allows HPA unlocking but fails IOs on the area */
4140  { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4141 
4142  /* Devices which report 1 sector over size HPA */
4143  { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4144  { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4145  { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4146 
4147  /* Devices which get the IVB wrong */
4148  { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4149  /* Maybe we should just blacklist TSSTcorp... */
4150  { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4151 
4152  /* Devices that do not need bridging limits applied */
4153  { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4154  { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4155 
4156  /* Devices which aren't very happy with higher link speeds */
4157  { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4158  { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4159 
4160  /*
4161  * Devices which choke on SETXFER. Applies only if both the
4162  * device and controller are SATA.
4163  */
4164  { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4165  { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4166  { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4167  { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4168  { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4169 
4170  /* End Marker */
4171  { }
4172 };
4173 
4201 static int glob_match (const char *text, const char *pattern)
4202 {
4203  do {
4204  /* Match single character or a '?' wildcard */
4205  if (*text == *pattern || *pattern == '?') {
4206  if (!*pattern++)
4207  return 0; /* End of both strings: match */
4208  } else {
4209  /* Match single char against a '[' bracketed ']' pattern set */
4210  if (!*text || *pattern != '[')
4211  break; /* Not a pattern set */
4212  while (*++pattern && *pattern != ']' && *text != *pattern) {
4213  if (*pattern == '-' && *(pattern - 1) != '[')
4214  if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4215  ++pattern;
4216  break;
4217  }
4218  }
4219  if (!*pattern || *pattern == ']')
4220  return 1; /* No match */
4221  while (*pattern && *pattern++ != ']');
4222  }
4223  } while (*++text && *pattern);
4224 
4225  /* Match any run of chars against a '*' wildcard */
4226  if (*pattern == '*') {
4227  if (!*++pattern)
4228  return 0; /* Match: avoid recursion at end of pattern */
4229  /* Loop to handle additional pattern chars after the wildcard */
4230  while (*text) {
4231  if (glob_match(text, pattern) == 0)
4232  return 0; /* Remainder matched */
4233  ++text; /* Absorb (match) this char and try again */
4234  }
4235  }
4236  if (!*text && !*pattern)
4237  return 0; /* End of both strings: match */
4238  return 1; /* No match */
4239 }
4240 
4241 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4242 {
4243  unsigned char model_num[ATA_ID_PROD_LEN + 1];
4244  unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4245  const struct ata_blacklist_entry *ad = ata_device_blacklist;
4246 
4247  ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4248  ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4249 
4250  while (ad->model_num) {
4251  if (!glob_match(model_num, ad->model_num)) {
4252  if (ad->model_rev == NULL)
4253  return ad->horkage;
4254  if (!glob_match(model_rev, ad->model_rev))
4255  return ad->horkage;
4256  }
4257  ad++;
4258  }
4259  return 0;
4260 }
4261 
4262 static int ata_dma_blacklisted(const struct ata_device *dev)
4263 {
4264  /* We don't support polling DMA.
4265  * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4266  * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4267  */
4268  if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4269  (dev->flags & ATA_DFLAG_CDB_INTR))
4270  return 1;
4271  return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4272 }
4273 
4282 static int ata_is_40wire(struct ata_device *dev)
4283 {
4284  if (dev->horkage & ATA_HORKAGE_IVB)
4285  return ata_drive_40wire_relaxed(dev->id);
4286  return ata_drive_40wire(dev->id);
4287 }
4288 
4302 static int cable_is_40wire(struct ata_port *ap)
4303 {
4304  struct ata_link *link;
4305  struct ata_device *dev;
4306 
4307  /* If the controller thinks we are 40 wire, we are. */
4308  if (ap->cbl == ATA_CBL_PATA40)
4309  return 1;
4310 
4311  /* If the controller thinks we are 80 wire, we are. */
4312  if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4313  return 0;
4314 
4315  /* If the system is known to be 40 wire short cable (eg
4316  * laptop), then we allow 80 wire modes even if the drive
4317  * isn't sure.
4318  */
4319  if (ap->cbl == ATA_CBL_PATA40_SHORT)
4320  return 0;
4321 
4322  /* If the controller doesn't know, we scan.
4323  *
4324  * Note: We look for all 40 wire detects at this point. Any
4325  * 80 wire detect is taken to be 80 wire cable because
4326  * - in many setups only the one drive (slave if present) will
4327  * give a valid detect
4328  * - if you have a non detect capable drive you don't want it
4329  * to colour the choice
4330  */
4331  ata_for_each_link(link, ap, EDGE) {
4332  ata_for_each_dev(dev, link, ENABLED) {
4333  if (!ata_is_40wire(dev))
4334  return 0;
4335  }
4336  }
4337  return 1;
4338 }
4339 
4352 static void ata_dev_xfermask(struct ata_device *dev)
4353 {
4354  struct ata_link *link = dev->link;
4355  struct ata_port *ap = link->ap;
4356  struct ata_host *host = ap->host;
4357  unsigned long xfer_mask;
4358 
4359  /* controller modes available */
4360  xfer_mask = ata_pack_xfermask(ap->pio_mask,
4361  ap->mwdma_mask, ap->udma_mask);
4362 
4363  /* drive modes available */
4364  xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4365  dev->mwdma_mask, dev->udma_mask);
4366  xfer_mask &= ata_id_xfermask(dev->id);
4367 
4368  /*
4369  * CFA Advanced TrueIDE timings are not allowed on a shared
4370  * cable
4371  */
4372  if (ata_dev_pair(dev)) {
4373  /* No PIO5 or PIO6 */
4374  xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4375  /* No MWDMA3 or MWDMA 4 */
4376  xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4377  }
4378 
4379  if (ata_dma_blacklisted(dev)) {
4380  xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4381  ata_dev_warn(dev,
4382  "device is on DMA blacklist, disabling DMA\n");
4383  }
4384 
4385  if ((host->flags & ATA_HOST_SIMPLEX) &&
4386  host->simplex_claimed && host->simplex_claimed != ap) {
4387  xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4388  ata_dev_warn(dev,
4389  "simplex DMA is claimed by other device, disabling DMA\n");
4390  }
4391 
4392  if (ap->flags & ATA_FLAG_NO_IORDY)
4393  xfer_mask &= ata_pio_mask_no_iordy(dev);
4394 
4395  if (ap->ops->mode_filter)
4396  xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4397 
4398  /* Apply cable rule here. Don't apply it early because when
4399  * we handle hot plug the cable type can itself change.
4400  * Check this last so that we know if the transfer rate was
4401  * solely limited by the cable.
4402  * Unknown or 80 wire cables reported host side are checked
4403  * drive side as well. Cases where we know a 40wire cable
4404  * is used safely for 80 are not checked here.
4405  */
4406  if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4407  /* UDMA/44 or higher would be available */
4408  if (cable_is_40wire(ap)) {
4409  ata_dev_warn(dev,
4410  "limited to UDMA/33 due to 40-wire cable\n");
4411  xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4412  }
4413 
4414  ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4415  &dev->mwdma_mask, &dev->udma_mask);
4416 }
4417 
4432 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4433 {
4434  struct ata_taskfile tf;
4435  unsigned int err_mask;
4436 
4437  /* set up set-features taskfile */
4438  DPRINTK("set features - xfer mode\n");
4439 
4440  /* Some controllers and ATAPI devices show flaky interrupt
4441  * behavior after setting xfer mode. Use polling instead.
4442  */
4443  ata_tf_init(dev, &tf);
4448  /* If we are using IORDY we must send the mode setting command */
4449  if (ata_pio_need_iordy(dev))
4450  tf.nsect = dev->xfer_mode;
4451  /* If the device has IORDY and the controller does not - turn it off */
4452  else if (ata_id_has_iordy(dev->id))
4453  tf.nsect = 0x01;
4454  else /* In the ancient relic department - skip all of this */
4455  return 0;
4456 
4457  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4458 
4459  DPRINTK("EXIT, err_mask=%x\n", err_mask);
4460  return err_mask;
4461 }
4462 
4478 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4479 {
4480  struct ata_taskfile tf;
4481  unsigned int err_mask;
4482 
4483  /* set up set-features taskfile */
4484  DPRINTK("set features - SATA features\n");
4485 
4486  ata_tf_init(dev, &tf);
4488  tf.feature = enable;
4491  tf.nsect = feature;
4492 
4493  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4494 
4495  DPRINTK("EXIT, err_mask=%x\n", err_mask);
4496  return err_mask;
4497 }
4499 
4512 static unsigned int ata_dev_init_params(struct ata_device *dev,
4513  u16 heads, u16 sectors)
4514 {
4515  struct ata_taskfile tf;
4516  unsigned int err_mask;
4517 
4518  /* Number of sectors per track 1-255. Number of heads 1-16 */
4519  if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4520  return AC_ERR_INVALID;
4521 
4522  /* set up init dev params taskfile */
4523  DPRINTK("init dev params \n");
4524 
4525  ata_tf_init(dev, &tf);
4529  tf.nsect = sectors;
4530  tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4531 
4532  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4533  /* A clean abort indicates an original or just out of spec drive
4534  and we should continue as we issue the setup based on the
4535  drive reported working geometry */
4536  if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4537  err_mask = 0;
4538 
4539  DPRINTK("EXIT, err_mask=%x\n", err_mask);
4540  return err_mask;
4541 }
4542 
4553 {
4554  struct ata_port *ap = qc->ap;
4555  struct scatterlist *sg = qc->sg;
4556  int dir = qc->dma_dir;
4557 
4558  WARN_ON_ONCE(sg == NULL);
4559 
4560  VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4561 
4562  if (qc->n_elem)
4563  dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4564 
4565  qc->flags &= ~ATA_QCFLAG_DMAMAP;
4566  qc->sg = NULL;
4567 }
4568 
4584 {
4585  struct ata_port *ap = qc->ap;
4586 
4587  /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4588  * few ATAPI devices choke on such DMA requests.
4589  */
4590  if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4591  unlikely(qc->nbytes & 15))
4592  return 1;
4593 
4594  if (ap->ops->check_atapi_dma)
4595  return ap->ops->check_atapi_dma(qc);
4596 
4597  return 0;
4598 }
4599 
4616 {
4617  struct ata_link *link = qc->dev->link;
4618 
4619  if (qc->tf.protocol == ATA_PROT_NCQ) {
4620  if (!ata_tag_valid(link->active_tag))
4621  return 0;
4622  } else {
4623  if (!ata_tag_valid(link->active_tag) && !link->sactive)
4624  return 0;
4625  }
4626 
4627  return ATA_DEFER_LINK;
4628 }
4629 
4630 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4631 
4645 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4646  unsigned int n_elem)
4647 {
4648  qc->sg = sg;
4649  qc->n_elem = n_elem;
4650  qc->cursg = qc->sg;
4651 }
4652 
4666 static int ata_sg_setup(struct ata_queued_cmd *qc)
4667 {
4668  struct ata_port *ap = qc->ap;
4669  unsigned int n_elem;
4670 
4671  VPRINTK("ENTER, ata%u\n", ap->print_id);
4672 
4673  n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4674  if (n_elem < 1)
4675  return -1;
4676 
4677  DPRINTK("%d sg elements mapped\n", n_elem);
4678  qc->orig_n_elem = qc->n_elem;
4679  qc->n_elem = n_elem;
4680  qc->flags |= ATA_QCFLAG_DMAMAP;
4681 
4682  return 0;
4683 }
4684 
4697 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4698 {
4699 #ifdef __BIG_ENDIAN
4700  unsigned int i;
4701 
4702  for (i = 0; i < buf_words; i++)
4703  buf[i] = le16_to_cpu(buf[i]);
4704 #endif /* __BIG_ENDIAN */
4705 }
4706 
4715 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4716 {
4717  struct ata_queued_cmd *qc = NULL;
4718  unsigned int i;
4719 
4720  /* no command while frozen */
4721  if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4722  return NULL;
4723 
4724  /* the last tag is reserved for internal command. */
4725  for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4726  if (!test_and_set_bit(i, &ap->qc_allocated)) {
4727  qc = __ata_qc_from_tag(ap, i);
4728  break;
4729  }
4730 
4731  if (qc)
4732  qc->tag = i;
4733 
4734  return qc;
4735 }
4736 
4746 {
4747  struct ata_port *ap = dev->link->ap;
4748  struct ata_queued_cmd *qc;
4749 
4750  qc = ata_qc_new(ap);
4751  if (qc) {
4752  qc->scsicmd = NULL;
4753  qc->ap = ap;
4754  qc->dev = dev;
4755 
4756  ata_qc_reinit(qc);
4757  }
4758 
4759  return qc;
4760 }
4761 
4772 void ata_qc_free(struct ata_queued_cmd *qc)
4773 {
4774  struct ata_port *ap;
4775  unsigned int tag;
4776 
4777  WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4778  ap = qc->ap;
4779 
4780  qc->flags = 0;
4781  tag = qc->tag;
4782  if (likely(ata_tag_valid(tag))) {
4783  qc->tag = ATA_TAG_POISON;
4784  clear_bit(tag, &ap->qc_allocated);
4785  }
4786 }
4787 
4789 {
4790  struct ata_port *ap;
4791  struct ata_link *link;
4792 
4793  WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4795  ap = qc->ap;
4796  link = qc->dev->link;
4797 
4798  if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4799  ata_sg_clean(qc);
4800 
4801  /* command should be marked inactive atomically with qc completion */
4802  if (qc->tf.protocol == ATA_PROT_NCQ) {
4803  link->sactive &= ~(1 << qc->tag);
4804  if (!link->sactive)
4805  ap->nr_active_links--;
4806  } else {
4807  link->active_tag = ATA_TAG_POISON;
4808  ap->nr_active_links--;
4809  }
4810 
4811  /* clear exclusive status */
4812  if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4813  ap->excl_link == link))
4814  ap->excl_link = NULL;
4815 
4816  /* atapi: mark qc as inactive to prevent the interrupt handler
4817  * from completing the command twice later, before the error handler
4818  * is called. (when rc != 0 and atapi request sense is needed)
4819  */
4820  qc->flags &= ~ATA_QCFLAG_ACTIVE;
4821  ap->qc_active &= ~(1 << qc->tag);
4822 
4823  /* call completion callback */
4824  qc->complete_fn(qc);
4825 }
4826 
4827 static void fill_result_tf(struct ata_queued_cmd *qc)
4828 {
4829  struct ata_port *ap = qc->ap;
4830 
4831  qc->result_tf.flags = qc->tf.flags;
4832  ap->ops->qc_fill_rtf(qc);
4833 }
4834 
4835 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4836 {
4837  struct ata_device *dev = qc->dev;
4838 
4839  if (ata_is_nodata(qc->tf.protocol))
4840  return;
4841 
4842  if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4843  return;
4844 
4845  dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4846 }
4847 
4864 {
4865  struct ata_port *ap = qc->ap;
4866 
4867  /* XXX: New EH and old EH use different mechanisms to
4868  * synchronize EH with regular execution path.
4869  *
4870  * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4871  * Normal execution path is responsible for not accessing a
4872  * failed qc. libata core enforces the rule by returning NULL
4873  * from ata_qc_from_tag() for failed qcs.
4874  *
4875  * Old EH depends on ata_qc_complete() nullifying completion
4876  * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4877  * not synchronize with interrupt handler. Only PIO task is
4878  * taken care of.
4879  */
4880  if (ap->ops->error_handler) {
4881  struct ata_device *dev = qc->dev;
4882  struct ata_eh_info *ehi = &dev->link->eh_info;
4883 
4884  if (unlikely(qc->err_mask))
4885  qc->flags |= ATA_QCFLAG_FAILED;
4886 
4887  /*
4888  * Finish internal commands without any further processing
4889  * and always with the result TF filled.
4890  */
4891  if (unlikely(ata_tag_internal(qc->tag))) {
4892  fill_result_tf(qc);
4893  __ata_qc_complete(qc);
4894  return;
4895  }
4896 
4897  /*
4898  * Non-internal qc has failed. Fill the result TF and
4899  * summon EH.
4900  */
4901  if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4902  fill_result_tf(qc);
4903  ata_qc_schedule_eh(qc);
4904  return;
4905  }
4906 
4908 
4909  /* read result TF if requested */
4910  if (qc->flags & ATA_QCFLAG_RESULT_TF)
4911  fill_result_tf(qc);
4912 
4913  /* Some commands need post-processing after successful
4914  * completion.
4915  */
4916  switch (qc->tf.command) {
4917  case ATA_CMD_SET_FEATURES:
4918  if (qc->tf.feature != SETFEATURES_WC_ON &&
4919  qc->tf.feature != SETFEATURES_WC_OFF)
4920  break;
4921  /* fall through */
4922  case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4923  case ATA_CMD_SET_MULTI: /* multi_count changed */
4924  /* revalidate device */
4925  ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4927  break;
4928 
4929  case ATA_CMD_SLEEP:
4930  dev->flags |= ATA_DFLAG_SLEEPING;
4931  break;
4932  }
4933 
4935  ata_verify_xfer(qc);
4936 
4937  __ata_qc_complete(qc);
4938  } else {
4939  if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4940  return;
4941 
4942  /* read result TF if failed or requested */
4943  if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4944  fill_result_tf(qc);
4945 
4946  __ata_qc_complete(qc);
4947  }
4948 }
4949 
4970 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4971 {
4972  int nr_done = 0;
4973  u32 done_mask;
4974 
4975  done_mask = ap->qc_active ^ qc_active;
4976 
4977  if (unlikely(done_mask & qc_active)) {
4978  ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
4979  ap->qc_active, qc_active);
4980  return -EINVAL;
4981  }
4982 
4983  while (done_mask) {
4984  struct ata_queued_cmd *qc;
4985  unsigned int tag = __ffs(done_mask);
4986 
4987  qc = ata_qc_from_tag(ap, tag);
4988  if (qc) {
4989  ata_qc_complete(qc);
4990  nr_done++;
4991  }
4992  done_mask &= ~(1 << tag);
4993  }
4994 
4995  return nr_done;
4996 }
4997 
5011 {
5012  struct ata_port *ap = qc->ap;
5013  struct ata_link *link = qc->dev->link;
5014  u8 prot = qc->tf.protocol;
5015 
5016  /* Make sure only one non-NCQ command is outstanding. The
5017  * check is skipped for old EH because it reuses active qc to
5018  * request ATAPI sense.
5019  */
5020  WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5021 
5022  if (ata_is_ncq(prot)) {
5023  WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5024 
5025  if (!link->sactive)
5026  ap->nr_active_links++;
5027  link->sactive |= 1 << qc->tag;
5028  } else {
5029  WARN_ON_ONCE(link->sactive);
5030 
5031  ap->nr_active_links++;
5032  link->active_tag = qc->tag;
5033  }
5034 
5035  qc->flags |= ATA_QCFLAG_ACTIVE;
5036  ap->qc_active |= 1 << qc->tag;
5037 
5038  /*
5039  * We guarantee to LLDs that they will have at least one
5040  * non-zero sg if the command is a data command.
5041  */
5042  if (WARN_ON_ONCE(ata_is_data(prot) &&
5043  (!qc->sg || !qc->n_elem || !qc->nbytes)))
5044  goto sys_err;
5045 
5046  if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5047  (ap->flags & ATA_FLAG_PIO_DMA)))
5048  if (ata_sg_setup(qc))
5049  goto sys_err;
5050 
5051  /* if device is sleeping, schedule reset and abort the link */
5052  if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5053  link->eh_info.action |= ATA_EH_RESET;
5054  ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5055  ata_link_abort(link);
5056  return;
5057  }
5058 
5059  ap->ops->qc_prep(qc);
5060 
5061  qc->err_mask |= ap->ops->qc_issue(qc);
5062  if (unlikely(qc->err_mask))
5063  goto err;
5064  return;
5065 
5066 sys_err:
5067  qc->err_mask |= AC_ERR_SYSTEM;
5068 err:
5069  ata_qc_complete(qc);
5070 }
5071 
5084 int sata_scr_valid(struct ata_link *link)
5085 {
5086  struct ata_port *ap = link->ap;
5087 
5088  return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5089 }
5090 
5107 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5108 {
5109  if (ata_is_host_link(link)) {
5110  if (sata_scr_valid(link))
5111  return link->ap->ops->scr_read(link, reg, val);
5112  return -EOPNOTSUPP;
5113  }
5114 
5115  return sata_pmp_scr_read(link, reg, val);
5116 }
5117 
5134 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5135 {
5136  if (ata_is_host_link(link)) {
5137  if (sata_scr_valid(link))
5138  return link->ap->ops->scr_write(link, reg, val);
5139  return -EOPNOTSUPP;
5140  }
5141 
5142  return sata_pmp_scr_write(link, reg, val);
5143 }
5144 
5160 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5161 {
5162  if (ata_is_host_link(link)) {
5163  int rc;
5164 
5165  if (sata_scr_valid(link)) {
5166  rc = link->ap->ops->scr_write(link, reg, val);
5167  if (rc == 0)
5168  rc = link->ap->ops->scr_read(link, reg, &val);
5169  return rc;
5170  }
5171  return -EOPNOTSUPP;
5172  }
5173 
5174  return sata_pmp_scr_write(link, reg, val);
5175 }
5176 
5191 bool ata_phys_link_online(struct ata_link *link)
5192 {
5193  u32 sstatus;
5194 
5195  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5196  ata_sstatus_online(sstatus))
5197  return true;
5198  return false;
5199 }
5200 
5216 {
5217  u32 sstatus;
5218 
5219  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5220  !ata_sstatus_online(sstatus))
5221  return true;
5222  return false;
5223 }
5224 
5241 bool ata_link_online(struct ata_link *link)
5242 {
5243  struct ata_link *slave = link->ap->slave_link;
5244 
5245  WARN_ON(link == slave); /* shouldn't be called on slave link */
5246 
5247  return ata_phys_link_online(link) ||
5248  (slave && ata_phys_link_online(slave));
5249 }
5250 
5267 bool ata_link_offline(struct ata_link *link)
5268 {
5269  struct ata_link *slave = link->ap->slave_link;
5270 
5271  WARN_ON(link == slave); /* shouldn't be called on slave link */
5272 
5273  return ata_phys_link_offline(link) &&
5274  (!slave || ata_phys_link_offline(slave));
5275 }
5276 
5277 #ifdef CONFIG_PM
5278 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5279  unsigned int action, unsigned int ehi_flags,
5280  int *async)
5281 {
5282  struct ata_link *link;
5283  unsigned long flags;
5284  int rc = 0;
5285 
5286  /* Previous resume operation might still be in
5287  * progress. Wait for PM_PENDING to clear.
5288  */
5289  if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5290  if (async) {
5291  *async = -EAGAIN;
5292  return 0;
5293  }
5294  ata_port_wait_eh(ap);
5296  }
5297 
5298  /* request PM ops to EH */
5299  spin_lock_irqsave(ap->lock, flags);
5300 
5301  ap->pm_mesg = mesg;
5302  if (async)
5303  ap->pm_result = async;
5304  else
5305  ap->pm_result = &rc;
5306 
5308  ata_for_each_link(link, ap, HOST_FIRST) {
5309  link->eh_info.action |= action;
5310  link->eh_info.flags |= ehi_flags;
5311  }
5312 
5314 
5315  spin_unlock_irqrestore(ap->lock, flags);
5316 
5317  /* wait and check result */
5318  if (!async) {
5319  ata_port_wait_eh(ap);
5321  }
5322 
5323  return rc;
5324 }
5325 
5326 static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5327 {
5328  unsigned int ehi_flags = ATA_EHI_QUIET;
5329  int rc;
5330 
5331  /*
5332  * On some hardware, device fails to respond after spun down
5333  * for suspend. As the device won't be used before being
5334  * resumed, we don't need to touch the device. Ask EH to skip
5335  * the usual stuff and proceed directly to suspend.
5336  *
5337  * http://thread.gmane.org/gmane.linux.ide/46764
5338  */
5339  if (mesg.event == PM_EVENT_SUSPEND)
5340  ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5341 
5342  rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5343  return rc;
5344 }
5345 
5346 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5347 {
5348  struct ata_port *ap = to_ata_port(dev);
5349 
5350  return __ata_port_suspend_common(ap, mesg, NULL);
5351 }
5352 
5353 static int ata_port_suspend(struct device *dev)
5354 {
5355  if (pm_runtime_suspended(dev))
5356  return 0;
5357 
5358  return ata_port_suspend_common(dev, PMSG_SUSPEND);
5359 }
5360 
5361 static int ata_port_do_freeze(struct device *dev)
5362 {
5363  if (pm_runtime_suspended(dev))
5364  pm_runtime_resume(dev);
5365 
5366  return ata_port_suspend_common(dev, PMSG_FREEZE);
5367 }
5368 
5369 static int ata_port_poweroff(struct device *dev)
5370 {
5371  if (pm_runtime_suspended(dev))
5372  return 0;
5373 
5374  return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5375 }
5376 
5377 static int __ata_port_resume_common(struct ata_port *ap, int *async)
5378 {
5379  int rc;
5380 
5381  rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
5383  return rc;
5384 }
5385 
5386 static int ata_port_resume_common(struct device *dev)
5387 {
5388  struct ata_port *ap = to_ata_port(dev);
5389 
5390  return __ata_port_resume_common(ap, NULL);
5391 }
5392 
5393 static int ata_port_resume(struct device *dev)
5394 {
5395  int rc;
5396 
5397  rc = ata_port_resume_common(dev);
5398  if (!rc) {
5399  pm_runtime_disable(dev);
5400  pm_runtime_set_active(dev);
5401  pm_runtime_enable(dev);
5402  }
5403 
5404  return rc;
5405 }
5406 
5407 static int ata_port_runtime_idle(struct device *dev)
5408 {
5409  return pm_runtime_suspend(dev);
5410 }
5411 
5412 static const struct dev_pm_ops ata_port_pm_ops = {
5413  .suspend = ata_port_suspend,
5414  .resume = ata_port_resume,
5415  .freeze = ata_port_do_freeze,
5416  .thaw = ata_port_resume,
5417  .poweroff = ata_port_poweroff,
5418  .restore = ata_port_resume,
5419 
5420  .runtime_suspend = ata_port_suspend,
5421  .runtime_resume = ata_port_resume_common,
5422  .runtime_idle = ata_port_runtime_idle,
5423 };
5424 
5425 /* sas ports don't participate in pm runtime management of ata_ports,
5426  * and need to resume ata devices at the domain level, not the per-port
5427  * level. sas suspend/resume is async to allow parallel port recovery
5428  * since sas has multiple ata_port instances per Scsi_Host.
5429  */
5430 int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5431 {
5432  return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5433 }
5434 EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5435 
5436 int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5437 {
5438  return __ata_port_resume_common(ap, async);
5439 }
5440 EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5441 
5442 
5450 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5451 {
5452  host->dev->power.power_state = mesg;
5453  return 0;
5454 }
5455 
5462 void ata_host_resume(struct ata_host *host)
5463 {
5464  host->dev->power.power_state = PMSG_ON;
5465 }
5466 #endif
5467 
5469  .name = "ata_port",
5470 #ifdef CONFIG_PM
5471  .pm = &ata_port_pm_ops,
5472 #endif
5473 };
5474 
5484 void ata_dev_init(struct ata_device *dev)
5485 {
5486  struct ata_link *link = ata_dev_phys_link(dev);
5487  struct ata_port *ap = link->ap;
5488  unsigned long flags;
5489 
5490  /* SATA spd limit is bound to the attached device, reset together */
5491  link->sata_spd_limit = link->hw_sata_spd_limit;
5492  link->sata_spd = 0;
5493 
5494  /* High bits of dev->flags are used to record warm plug
5495  * requests which occur asynchronously. Synchronize using
5496  * host lock.
5497  */
5498  spin_lock_irqsave(ap->lock, flags);
5499  dev->flags &= ~ATA_DFLAG_INIT_MASK;
5500  dev->horkage = 0;
5501  spin_unlock_irqrestore(ap->lock, flags);
5502 
5503  memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5505  dev->pio_mask = UINT_MAX;
5506  dev->mwdma_mask = UINT_MAX;
5507  dev->udma_mask = UINT_MAX;
5508 }
5509 
5521 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5522 {
5523  int i;
5524 
5525  /* clear everything except for devices */
5526  memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5528 
5529  link->ap = ap;
5530  link->pmp = pmp;
5531  link->active_tag = ATA_TAG_POISON;
5532  link->hw_sata_spd_limit = UINT_MAX;
5533 
5534  /* can't use iterator, ap isn't initialized yet */
5535  for (i = 0; i < ATA_MAX_DEVICES; i++) {
5536  struct ata_device *dev = &link->device[i];
5537 
5538  dev->link = link;
5539  dev->devno = dev - link->device;
5540 #ifdef CONFIG_ATA_ACPI
5541  dev->gtf_filter = ata_acpi_gtf_filter;
5542 #endif
5543  ata_dev_init(dev);
5544  }
5545 }
5546 
5560 int sata_link_init_spd(struct ata_link *link)
5561 {
5562  u8 spd;
5563  int rc;
5564 
5565  rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5566  if (rc)
5567  return rc;
5568 
5569  spd = (link->saved_scontrol >> 4) & 0xf;
5570  if (spd)
5571  link->hw_sata_spd_limit &= (1 << spd) - 1;
5572 
5573  ata_force_link_limits(link);
5574 
5575  link->sata_spd_limit = link->hw_sata_spd_limit;
5576 
5577  return 0;
5578 }
5579 
5592 struct ata_port *ata_port_alloc(struct ata_host *host)
5593 {
5594  struct ata_port *ap;
5595 
5596  DPRINTK("ENTER\n");
5597 
5598  ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5599  if (!ap)
5600  return NULL;
5601 
5603  ap->lock = &host->lock;
5604  ap->print_id = -1;
5605  ap->host = host;
5606  ap->dev = host->dev;
5607 
5608 #if defined(ATA_VERBOSE_DEBUG)
5609  /* turn on all debugging levels */
5610  ap->msg_enable = 0x00FF;
5611 #elif defined(ATA_DEBUG)
5613 #else
5615 #endif
5616 
5620  INIT_LIST_HEAD(&ap->eh_done_q);
5622  init_completion(&ap->park_req_pending);
5625  ap->fastdrain_timer.data = (unsigned long)ap;
5626 
5627  ap->cbl = ATA_CBL_NONE;
5628 
5629  ata_link_init(ap, &ap->link, 0);
5630 
5631 #ifdef ATA_IRQ_TRAP
5632  ap->stats.unhandled_irq = 1;
5633  ap->stats.idle_irq = 1;
5634 #endif
5635  ata_sff_port_init(ap);
5636 
5637  return ap;
5638 }
5639 
5640 static void ata_host_release(struct device *gendev, void *res)
5641 {
5642  struct ata_host *host = dev_get_drvdata(gendev);
5643  int i;
5644 
5645  for (i = 0; i < host->n_ports; i++) {
5646  struct ata_port *ap = host->ports[i];
5647 
5648  if (!ap)
5649  continue;
5650 
5651  if (ap->scsi_host)
5652  scsi_host_put(ap->scsi_host);
5653 
5654  kfree(ap->pmp_link);
5655  kfree(ap->slave_link);
5656  kfree(ap);
5657  host->ports[i] = NULL;
5658  }
5659 
5660  dev_set_drvdata(gendev, NULL);
5661 }
5662 
5683 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5684 {
5685  struct ata_host *host;
5686  size_t sz;
5687  int i;
5688 
5689  DPRINTK("ENTER\n");
5690 
5691  if (!devres_open_group(dev, NULL, GFP_KERNEL))
5692  return NULL;
5693 
5694  /* alloc a container for our list of ATA ports (buses) */
5695  sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5696  /* alloc a container for our list of ATA ports (buses) */
5697  host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5698  if (!host)
5699  goto err_out;
5700 
5701  devres_add(dev, host);
5702  dev_set_drvdata(dev, host);
5703 
5704  spin_lock_init(&host->lock);
5705  mutex_init(&host->eh_mutex);
5706  host->dev = dev;
5707  host->n_ports = max_ports;
5708 
5709  /* allocate ports bound to this host */
5710  for (i = 0; i < max_ports; i++) {
5711  struct ata_port *ap;
5712 
5713  ap = ata_port_alloc(host);
5714  if (!ap)
5715  goto err_out;
5716 
5717  ap->port_no = i;
5718  host->ports[i] = ap;
5719  }
5720 
5721  devres_remove_group(dev, NULL);
5722  return host;
5723 
5724  err_out:
5725  devres_release_group(dev, NULL);
5726  return NULL;
5727 }
5728 
5746  const struct ata_port_info * const * ppi,
5747  int n_ports)
5748 {
5749  const struct ata_port_info *pi;
5750  struct ata_host *host;
5751  int i, j;
5752 
5753  host = ata_host_alloc(dev, n_ports);
5754  if (!host)
5755  return NULL;
5756 
5757  for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5758  struct ata_port *ap = host->ports[i];
5759 
5760  if (ppi[j])
5761  pi = ppi[j++];
5762 
5763  ap->pio_mask = pi->pio_mask;
5764  ap->mwdma_mask = pi->mwdma_mask;
5765  ap->udma_mask = pi->udma_mask;
5766  ap->flags |= pi->flags;
5767  ap->link.flags |= pi->link_flags;
5768  ap->ops = pi->port_ops;
5769 
5770  if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5771  host->ops = pi->port_ops;
5772  }
5773 
5774  return host;
5775 }
5776 
5824 {
5825  struct ata_link *link;
5826 
5827  WARN_ON(ap->slave_link);
5828  WARN_ON(ap->flags & ATA_FLAG_PMP);
5829 
5830  link = kzalloc(sizeof(*link), GFP_KERNEL);
5831  if (!link)
5832  return -ENOMEM;
5833 
5834  ata_link_init(ap, link, 1);
5835  ap->slave_link = link;
5836  return 0;
5837 }
5838 
5839 static void ata_host_stop(struct device *gendev, void *res)
5840 {
5841  struct ata_host *host = dev_get_drvdata(gendev);
5842  int i;
5843 
5844  WARN_ON(!(host->flags & ATA_HOST_STARTED));
5845 
5846  for (i = 0; i < host->n_ports; i++) {
5847  struct ata_port *ap = host->ports[i];
5848 
5849  if (ap->ops->port_stop)
5850  ap->ops->port_stop(ap);
5851  }
5852 
5853  if (host->ops->host_stop)
5854  host->ops->host_stop(host);
5855 }
5856 
5877 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5878 {
5879  static DEFINE_SPINLOCK(lock);
5880  const struct ata_port_operations *cur;
5881  void **begin = (void **)ops;
5882  void **end = (void **)&ops->inherits;
5883  void **pp;
5884 
5885  if (!ops || !ops->inherits)
5886  return;
5887 
5888  spin_lock(&lock);
5889 
5890  for (cur = ops->inherits; cur; cur = cur->inherits) {
5891  void **inherit = (void **)cur;
5892 
5893  for (pp = begin; pp < end; pp++, inherit++)
5894  if (!*pp)
5895  *pp = *inherit;
5896  }
5897 
5898  for (pp = begin; pp < end; pp++)
5899  if (IS_ERR(*pp))
5900  *pp = NULL;
5901 
5902  ops->inherits = NULL;
5903 
5904  spin_unlock(&lock);
5905 }
5906 
5923 int ata_host_start(struct ata_host *host)
5924 {
5925  int have_stop = 0;
5926  void *start_dr = NULL;
5927  int i, rc;
5928 
5929  if (host->flags & ATA_HOST_STARTED)
5930  return 0;
5931 
5932  ata_finalize_port_ops(host->ops);
5933 
5934  for (i = 0; i < host->n_ports; i++) {
5935  struct ata_port *ap = host->ports[i];
5936 
5937  ata_finalize_port_ops(ap->ops);
5938 
5939  if (!host->ops && !ata_port_is_dummy(ap))
5940  host->ops = ap->ops;
5941 
5942  if (ap->ops->port_stop)
5943  have_stop = 1;
5944  }
5945 
5946  if (host->ops->host_stop)
5947  have_stop = 1;
5948 
5949  if (have_stop) {
5950  start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5951  if (!start_dr)
5952  return -ENOMEM;
5953  }
5954 
5955  for (i = 0; i < host->n_ports; i++) {
5956  struct ata_port *ap = host->ports[i];
5957 
5958  if (ap->ops->port_start) {
5959  rc = ap->ops->port_start(ap);
5960  if (rc) {
5961  if (rc != -ENODEV)
5962  dev_err(host->dev,
5963  "failed to start port %d (errno=%d)\n",
5964  i, rc);
5965  goto err_out;
5966  }
5967  }
5968  ata_eh_freeze_port(ap);
5969  }
5970 
5971  if (start_dr)
5972  devres_add(host->dev, start_dr);
5973  host->flags |= ATA_HOST_STARTED;
5974  return 0;
5975 
5976  err_out:
5977  while (--i >= 0) {
5978  struct ata_port *ap = host->ports[i];
5979 
5980  if (ap->ops->port_stop)
5981  ap->ops->port_stop(ap);
5982  }
5983  devres_free(start_dr);
5984  return rc;
5985 }
5986 
5994 void ata_host_init(struct ata_host *host, struct device *dev,
5995  struct ata_port_operations *ops)
5996 {
5997  spin_lock_init(&host->lock);
5998  mutex_init(&host->eh_mutex);
5999  host->dev = dev;
6000  host->ops = ops;
6001 }
6002 
6003 void __ata_port_probe(struct ata_port *ap)
6004 {
6005  struct ata_eh_info *ehi = &ap->link.eh_info;
6006  unsigned long flags;
6007 
6008  /* kick EH for boot probing */
6009  spin_lock_irqsave(ap->lock, flags);
6010 
6011  ehi->probe_mask |= ATA_ALL_DEVICES;
6012  ehi->action |= ATA_EH_RESET;
6014 
6016  ap->pflags |= ATA_PFLAG_LOADING;
6018 
6019  spin_unlock_irqrestore(ap->lock, flags);
6020 }
6021 
6022 int ata_port_probe(struct ata_port *ap)
6023 {
6024  int rc = 0;
6025 
6026  if (ap->ops->error_handler) {
6027  __ata_port_probe(ap);
6028  ata_port_wait_eh(ap);
6029  } else {
6030  DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6031  rc = ata_bus_probe(ap);
6032  DPRINTK("ata%u: bus probe end\n", ap->print_id);
6033  }
6034  return rc;
6035 }
6036 
6037 
6038 static void async_port_probe(void *data, async_cookie_t cookie)
6039 {
6040  struct ata_port *ap = data;
6041 
6042  /*
6043  * If we're not allowed to scan this host in parallel,
6044  * we need to wait until all previous scans have completed
6045  * before going further.
6046  * Jeff Garzik says this is only within a controller, so we
6047  * don't need to wait for port 0, only for later ports.
6048  */
6049  if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6050  async_synchronize_cookie(cookie);
6051 
6052  (void)ata_port_probe(ap);
6053 
6054  /* in order to keep device order, we need to synchronize at this point */
6055  async_synchronize_cookie(cookie);
6056 
6057  ata_scsi_scan_host(ap, 1);
6058 }
6059 
6076 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6077 {
6078  int i, rc;
6079 
6080  /* host must have been started */
6081  if (!(host->flags & ATA_HOST_STARTED)) {
6082  dev_err(host->dev, "BUG: trying to register unstarted host\n");
6083  WARN_ON(1);
6084  return -EINVAL;
6085  }
6086 
6087  /* Blow away unused ports. This happens when LLD can't
6088  * determine the exact number of ports to allocate at
6089  * allocation time.
6090  */
6091  for (i = host->n_ports; host->ports[i]; i++)
6092  kfree(host->ports[i]);
6093 
6094  /* give ports names and add SCSI hosts */
6095  for (i = 0; i < host->n_ports; i++)
6096  host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6097 
6098 
6099  /* Create associated sysfs transport objects */
6100  for (i = 0; i < host->n_ports; i++) {
6101  rc = ata_tport_add(host->dev,host->ports[i]);
6102  if (rc) {
6103  goto err_tadd;
6104  }
6105  }
6106 
6107  rc = ata_scsi_add_hosts(host, sht);
6108  if (rc)
6109  goto err_tadd;
6110 
6111  /* set cable, sata_spd_limit and report */
6112  for (i = 0; i < host->n_ports; i++) {
6113  struct ata_port *ap = host->ports[i];
6114  unsigned long xfer_mask;
6115 
6116  /* set SATA cable type if still unset */
6117  if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6118  ap->cbl = ATA_CBL_SATA;
6119 
6120  /* init sata_spd_limit to the current value */
6121  sata_link_init_spd(&ap->link);
6122  if (ap->slave_link)
6124 
6125  /* print per-port info to dmesg */
6126  xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6127  ap->udma_mask);
6128 
6129  if (!ata_port_is_dummy(ap)) {
6130  ata_port_info(ap, "%cATA max %s %s\n",
6131  (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6132  ata_mode_string(xfer_mask),
6133  ap->link.eh_info.desc);
6134  ata_ehi_clear_desc(&ap->link.eh_info);
6135  } else
6136  ata_port_info(ap, "DUMMY\n");
6137  }
6138 
6139  /* perform each probe asynchronously */
6140  for (i = 0; i < host->n_ports; i++) {
6141  struct ata_port *ap = host->ports[i];
6142  async_schedule(async_port_probe, ap);
6143  }
6144 
6145  return 0;
6146 
6147  err_tadd:
6148  while (--i >= 0) {
6149  ata_tport_delete(host->ports[i]);
6150  }
6151  return rc;
6152 
6153 }
6154 
6178 int ata_host_activate(struct ata_host *host, int irq,
6179  irq_handler_t irq_handler, unsigned long irq_flags,
6180  struct scsi_host_template *sht)
6181 {
6182  int i, rc;
6183 
6184  rc = ata_host_start(host);
6185  if (rc)
6186  return rc;
6187 
6188  /* Special case for polling mode */
6189  if (!irq) {
6190  WARN_ON(irq_handler);
6191  return ata_host_register(host, sht);
6192  }
6193 
6194  rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6195  dev_driver_string(host->dev), host);
6196  if (rc)
6197  return rc;
6198 
6199  for (i = 0; i < host->n_ports; i++)
6200  ata_port_desc(host->ports[i], "irq %d", irq);
6201 
6202  rc = ata_host_register(host, sht);
6203  /* if failed, just free the IRQ and leave ports alone */
6204  if (rc)
6205  devm_free_irq(host->dev, irq, host);
6206 
6207  return rc;
6208 }
6209 
6221 static void ata_port_detach(struct ata_port *ap)
6222 {
6223  unsigned long flags;
6224 
6225  if (!ap->ops->error_handler)
6226  goto skip_eh;
6227 
6228  /* tell EH we're leaving & flush EH */
6229  spin_lock_irqsave(ap->lock, flags);
6230  ap->pflags |= ATA_PFLAG_UNLOADING;
6232  spin_unlock_irqrestore(ap->lock, flags);
6233 
6234  /* wait till EH commits suicide */
6235  ata_port_wait_eh(ap);
6236 
6237  /* it better be dead now */
6238  WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6239 
6241 
6242  skip_eh:
6243  if (ap->pmp_link) {
6244  int i;
6245  for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6246  ata_tlink_delete(&ap->pmp_link[i]);
6247  }
6248  ata_tport_delete(ap);
6249 
6250  /* remove the associated SCSI host */
6252 }
6253 
6263 void ata_host_detach(struct ata_host *host)
6264 {
6265  int i;
6266 
6267  for (i = 0; i < host->n_ports; i++)
6268  ata_port_detach(host->ports[i]);
6269 
6270  /* the host is dead now, dissociate ACPI */
6271  ata_acpi_dissociate(host);
6272 }
6273 
6274 #ifdef CONFIG_PCI
6275 
6287 void ata_pci_remove_one(struct pci_dev *pdev)
6288 {
6289  struct device *dev = &pdev->dev;
6290  struct ata_host *host = dev_get_drvdata(dev);
6291 
6292  ata_host_detach(host);
6293 }
6294 
6295 /* move to PCI subsystem */
6296 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6297 {
6298  unsigned long tmp = 0;
6299 
6300  switch (bits->width) {
6301  case 1: {
6302  u8 tmp8 = 0;
6303  pci_read_config_byte(pdev, bits->reg, &tmp8);
6304  tmp = tmp8;
6305  break;
6306  }
6307  case 2: {
6308  u16 tmp16 = 0;
6309  pci_read_config_word(pdev, bits->reg, &tmp16);
6310  tmp = tmp16;
6311  break;
6312  }
6313  case 4: {
6314  u32 tmp32 = 0;
6315  pci_read_config_dword(pdev, bits->reg, &tmp32);
6316  tmp = tmp32;
6317  break;
6318  }
6319 
6320  default:
6321  return -EINVAL;
6322  }
6323 
6324  tmp &= bits->mask;
6325 
6326  return (tmp == bits->val) ? 1 : 0;
6327 }
6328 
6329 #ifdef CONFIG_PM
6330 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6331 {
6332  pci_save_state(pdev);
6333  pci_disable_device(pdev);
6334 
6335  if (mesg.event & PM_EVENT_SLEEP)
6337 }
6338 
6339 int ata_pci_device_do_resume(struct pci_dev *pdev)
6340 {
6341  int rc;
6342 
6343  pci_set_power_state(pdev, PCI_D0);
6344  pci_restore_state(pdev);
6345 
6346  rc = pcim_enable_device(pdev);
6347  if (rc) {
6348  dev_err(&pdev->dev,
6349  "failed to enable device after resume (%d)\n", rc);
6350  return rc;
6351  }
6352 
6353  pci_set_master(pdev);
6354  return 0;
6355 }
6356 
6357 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6358 {
6359  struct ata_host *host = dev_get_drvdata(&pdev->dev);
6360  int rc = 0;
6361 
6362  rc = ata_host_suspend(host, mesg);
6363  if (rc)
6364  return rc;
6365 
6366  ata_pci_device_do_suspend(pdev, mesg);
6367 
6368  return 0;
6369 }
6370 
6371 int ata_pci_device_resume(struct pci_dev *pdev)
6372 {
6373  struct ata_host *host = dev_get_drvdata(&pdev->dev);
6374  int rc;
6375 
6376  rc = ata_pci_device_do_resume(pdev);
6377  if (rc == 0)
6378  ata_host_resume(host);
6379  return rc;
6380 }
6381 #endif /* CONFIG_PM */
6382 
6383 #endif /* CONFIG_PCI */
6384 
6385 static int __init ata_parse_force_one(char **cur,
6386  struct ata_force_ent *force_ent,
6387  const char **reason)
6388 {
6389  /* FIXME: Currently, there's no way to tag init const data and
6390  * using __initdata causes build failure on some versions of
6391  * gcc. Once __initdataconst is implemented, add const to the
6392  * following structure.
6393  */
6394  static struct ata_force_param force_tbl[] __initdata = {
6395  { "40c", .cbl = ATA_CBL_PATA40 },
6396  { "80c", .cbl = ATA_CBL_PATA80 },
6397  { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6398  { "unk", .cbl = ATA_CBL_PATA_UNK },
6399  { "ign", .cbl = ATA_CBL_PATA_IGN },
6400  { "sata", .cbl = ATA_CBL_SATA },
6401  { "1.5Gbps", .spd_limit = 1 },
6402  { "3.0Gbps", .spd_limit = 2 },
6403  { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6404  { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6405  { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6406  { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6407  { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6408  { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6409  { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6410  { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6411  { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6412  { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6413  { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6414  { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6415  { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6416  { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6417  { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6418  { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6419  { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6420  { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6421  { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6422  { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6423  { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6424  { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6425  { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6426  { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6427  { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6428  { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6429  { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6430  { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6431  { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6432  { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6433  { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6434  { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6435  { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6436  { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6437  { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6438  { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6439  { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6440  { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6441  { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6442  { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6443  { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6444  };
6445  char *start = *cur, *p = *cur;
6446  char *id, *val, *endp;
6447  const struct ata_force_param *match_fp = NULL;
6448  int nr_matches = 0, i;
6449 
6450  /* find where this param ends and update *cur */
6451  while (*p != '\0' && *p != ',')
6452  p++;
6453 
6454  if (*p == '\0')
6455  *cur = p;
6456  else
6457  *cur = p + 1;
6458 
6459  *p = '\0';
6460 
6461  /* parse */
6462  p = strchr(start, ':');
6463  if (!p) {
6464  val = strstrip(start);
6465  goto parse_val;
6466  }
6467  *p = '\0';
6468 
6469  id = strstrip(start);
6470  val = strstrip(p + 1);
6471 
6472  /* parse id */
6473  p = strchr(id, '.');
6474  if (p) {
6475  *p++ = '\0';
6476  force_ent->device = simple_strtoul(p, &endp, 10);
6477  if (p == endp || *endp != '\0') {
6478  *reason = "invalid device";
6479  return -EINVAL;
6480  }
6481  }
6482 
6483  force_ent->port = simple_strtoul(id, &endp, 10);
6484  if (p == endp || *endp != '\0') {
6485  *reason = "invalid port/link";
6486  return -EINVAL;
6487  }
6488 
6489  parse_val:
6490  /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6491  for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6492  const struct ata_force_param *fp = &force_tbl[i];
6493 
6494  if (strncasecmp(val, fp->name, strlen(val)))
6495  continue;
6496 
6497  nr_matches++;
6498  match_fp = fp;
6499 
6500  if (strcasecmp(val, fp->name) == 0) {
6501  nr_matches = 1;
6502  break;
6503  }
6504  }
6505 
6506  if (!nr_matches) {
6507  *reason = "unknown value";
6508  return -EINVAL;
6509  }
6510  if (nr_matches > 1) {
6511  *reason = "ambigious value";
6512  return -EINVAL;
6513  }
6514 
6515  force_ent->param = *match_fp;
6516 
6517  return 0;
6518 }
6519 
6520 static void __init ata_parse_force_param(void)
6521 {
6522  int idx = 0, size = 1;
6523  int last_port = -1, last_device = -1;
6524  char *p, *cur, *next;
6525 
6526  /* calculate maximum number of params and allocate force_tbl */
6527  for (p = ata_force_param_buf; *p; p++)
6528  if (*p == ',')
6529  size++;
6530 
6531  ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6532  if (!ata_force_tbl) {
6533  printk(KERN_WARNING "ata: failed to extend force table, "
6534  "libata.force ignored\n");
6535  return;
6536  }
6537 
6538  /* parse and populate the table */
6539  for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6540  const char *reason = "";
6541  struct ata_force_ent te = { .port = -1, .device = -1 };
6542 
6543  next = cur;
6544  if (ata_parse_force_one(&next, &te, &reason)) {
6545  printk(KERN_WARNING "ata: failed to parse force "
6546  "parameter \"%s\" (%s)\n",
6547  cur, reason);
6548  continue;
6549  }
6550 
6551  if (te.port == -1) {
6552  te.port = last_port;
6553  te.device = last_device;
6554  }
6555 
6556  ata_force_tbl[idx++] = te;
6557 
6558  last_port = te.port;
6559  last_device = te.device;
6560  }
6561 
6562  ata_force_tbl_size = idx;
6563 }
6564 
6565 static int __init ata_init(void)
6566 {
6567  int rc;
6568 
6569  ata_parse_force_param();
6570 
6572 
6573  rc = ata_sff_init();
6574  if (rc) {
6575  kfree(ata_force_tbl);
6576  return rc;
6577  }
6578 
6582  ata_sff_exit();
6583  rc = -ENOMEM;
6584  goto err_out;
6585  }
6586 
6587  printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6588  return 0;
6589 
6590 err_out:
6591  return rc;
6592 }
6593 
6594 static void __exit ata_exit(void)
6595 {
6598  ata_sff_exit();
6600  kfree(ata_force_tbl);
6601 }
6602 
6603 subsys_initcall(ata_init);
6604 module_exit(ata_exit);
6605 
6606 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6607 
6608 int ata_ratelimit(void)
6609 {
6610  return __ratelimit(&ratelimit);
6611 }
6612 
6627 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6628 {
6629  bool owns_eh = ap && ap->host->eh_owner == current;
6630 
6631  if (owns_eh)
6632  ata_eh_release(ap);
6633 
6634  msleep(msecs);
6635 
6636  if (owns_eh)
6637  ata_eh_acquire(ap);
6638 }
6639 
6665  unsigned long interval, unsigned long timeout)
6666 {
6667  unsigned long deadline;
6668  u32 tmp;
6669 
6670  tmp = ioread32(reg);
6671 
6672  /* Calculate timeout _after_ the first read to make sure
6673  * preceding writes reach the controller before starting to
6674  * eat away the timeout.
6675  */
6676  deadline = ata_deadline(jiffies, timeout);
6677 
6678  while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6679  ata_msleep(ap, interval);
6680  tmp = ioread32(reg);
6681  }
6682 
6683  return tmp;
6684 }
6685 
6686 /*
6687  * Dummy port_ops
6688  */
6689 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6690 {
6691  return AC_ERR_SYSTEM;
6692 }
6693 
6694 static void ata_dummy_error_handler(struct ata_port *ap)
6695 {
6696  /* truly dummy */
6697 }
6698 
6700  .qc_prep = ata_noop_qc_prep,
6701  .qc_issue = ata_dummy_qc_issue,
6702  .error_handler = ata_dummy_error_handler,
6703  .sched_eh = ata_std_sched_eh,
6704  .end_eh = ata_std_end_eh,
6705 };
6706 
6708  .port_ops = &ata_dummy_port_ops,
6709 };
6710 
6711 /*
6712  * Utility print functions
6713  */
6714 int ata_port_printk(const struct ata_port *ap, const char *level,
6715  const char *fmt, ...)
6716 {
6717  struct va_format vaf;
6718  va_list args;
6719  int r;
6720 
6721  va_start(args, fmt);
6722 
6723  vaf.fmt = fmt;
6724  vaf.va = &args;
6725 
6726  r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6727 
6728  va_end(args);
6729 
6730  return r;
6731 }
6733 
6734 int ata_link_printk(const struct ata_link *link, const char *level,
6735  const char *fmt, ...)
6736 {
6737  struct va_format vaf;
6738  va_list args;
6739  int r;
6740 
6741  va_start(args, fmt);
6742 
6743  vaf.fmt = fmt;
6744  vaf.va = &args;
6745 
6746  if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6747  r = printk("%sata%u.%02u: %pV",
6748  level, link->ap->print_id, link->pmp, &vaf);
6749  else
6750  r = printk("%sata%u: %pV",
6751  level, link->ap->print_id, &vaf);
6752 
6753  va_end(args);
6754 
6755  return r;
6756 }
6758 
6759 int ata_dev_printk(const struct ata_device *dev, const char *level,
6760  const char *fmt, ...)
6761 {
6762  struct va_format vaf;
6763  va_list args;
6764  int r;
6765 
6766  va_start(args, fmt);
6767 
6768  vaf.fmt = fmt;
6769  vaf.va = &args;
6770 
6771  r = printk("%sata%u.%02u: %pV",
6772  level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6773  &vaf);
6774 
6775  va_end(args);
6776 
6777  return r;
6778 }
6780 
6781 void ata_print_version(const struct device *dev, const char *version)
6782 {
6783  dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6784 }
6786 
6787 /*
6788  * libata is essentially a library of internal helper functions for
6789  * low-level ATA host controller drivers. As such, the API/ABI is
6790  * likely to change as new drivers are added and updated.
6791  * Do not depend on ABI/API stability.
6792  */
6796 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6797 EXPORT_SYMBOL_GPL(sata_port_ops);
6798 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6799 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6854 #ifdef CONFIG_PM
6855 EXPORT_SYMBOL_GPL(ata_host_suspend);
6856 EXPORT_SYMBOL_GPL(ata_host_resume);
6857 #endif /* CONFIG_PM */
6862 
6868 
6869 #ifdef CONFIG_PCI
6870 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6871 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6872 #ifdef CONFIG_PM
6873 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6874 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6875 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6876 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6877 #endif /* CONFIG_PM */
6878 #endif /* CONFIG_PCI */
6879 
6884 #ifdef CONFIG_PCI
6885 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6886 #endif /* CONFIG_PCI */
6899