43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
47 #include <linux/list.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
94 static unsigned int ata_dev_init_params(
struct ata_device *
dev,
96 static unsigned int ata_dev_set_xfermode(
struct ata_device *
dev);
98 static unsigned long ata_dev_blacklisted(
const struct ata_device *
dev);
119 static int ata_force_tbl_size;
124 MODULE_PARM_DESC(
force,
"Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126 static int atapi_enabled = 1;
128 MODULE_PARM_DESC(atapi_enabled,
"Enable discovery of ATAPI devices (0=off, 1=on [default])");
130 static int atapi_dmadir = 0;
132 MODULE_PARM_DESC(atapi_dmadir,
"Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
136 MODULE_PARM_DESC(atapi_passthru16,
"Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
142 static int ata_ignore_hpa;
144 MODULE_PARM_DESC(ignore_hpa,
"Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
150 static int ata_probe_timeout;
156 MODULE_PARM_DESC(noacpi,
"Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
160 MODULE_PARM_DESC(allow_tpm,
"Permit the use of TPM commands (0=off [default], 1=on)");
164 MODULE_PARM_DESC(atapi_an,
"Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
172 static bool ata_sstatus_online(
u32 sstatus)
174 return (sstatus & 0xf) == 0x3;
200 if (sata_pmp_attached(ap))
208 if (link == &ap->
link)
211 if (sata_pmp_attached(ap))
263 dev = link->
device + ata_link_max_devices(link) - 1;
272 if (++dev < link->
device + ata_link_max_devices(link))
277 if (--dev >= link->
device)
284 !ata_dev_enabled(dev))
331 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
362 static void ata_force_link_limits(
struct ata_link *
link)
364 bool did_spd =
false;
365 int linkno = link->
pmp;
368 if (ata_is_host_link(link))
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374 if (fe->
port != -1 && fe->
port != link->
ap->print_id)
381 if (!did_spd && fe->
param.spd_limit) {
389 if (fe->
param.lflags) {
392 "FORCE: link flag 0x%x forced -> 0x%x\n",
412 int alt_devno =
devno;
416 if (ata_is_host_link(dev->
link))
419 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 unsigned long pio_mask, mwdma_mask, udma_mask;
423 if (fe->
port != -1 && fe->
port != dev->
link->ap->print_id)
430 if (!fe->
param.xfer_mask)
434 &pio_mask, &mwdma_mask, &udma_mask);
437 else if (mwdma_mask) {
463 static void ata_force_horkage(
struct ata_device *dev)
466 int alt_devno =
devno;
470 if (ata_is_host_link(dev->
link))
473 for (i = 0; i < ata_force_tbl_size; i++) {
476 if (fe->
port != -1 && fe->
port != dev->
link->ap->print_id)
525 if (atapi_passthru16)
606 static const u8 ata_rw_cmds[] = {
669 cmd = ata_rw_cmds[index + fua + lba48 +
write];
702 block |= (tf->
device & 0xf) << 24;
704 block |= tf->
lbah << 16;
705 block |= tf->
lbam << 8;
716 "device reported invalid CHS sector 0\n");
751 tf->
flags |= tf_flags;
755 if (!lba_48_ok(block, n_block))
766 tf->
nsect = tag << 3;
770 tf->
hob_lbah = (block >> 40) & 0xff;
771 tf->
hob_lbam = (block >> 32) & 0xff;
772 tf->
hob_lbal = (block >> 24) & 0xff;
773 tf->
lbah = (block >> 16) & 0xff;
774 tf->
lbam = (block >> 8) & 0xff;
775 tf->
lbal = block & 0xff;
783 if (lba_28_ok(block, n_block)) {
785 tf->
device |= (block >> 24) & 0xf;
786 }
else if (lba_48_ok(block, n_block)) {
795 tf->
hob_lbah = (block >> 40) & 0xff;
796 tf->
hob_lbam = (block >> 32) & 0xff;
797 tf->
hob_lbal = (block >> 24) & 0xff;
802 if (
unlikely(ata_rwcmd_protocol(tf, dev) < 0))
805 tf->
nsect = n_block & 0xff;
807 tf->
lbah = (block >> 16) & 0xff;
808 tf->
lbam = (block >> 8) & 0xff;
809 tf->
lbal = block & 0xff;
817 if (!lba_28_ok(block, n_block))
820 if (
unlikely(ata_rwcmd_protocol(tf, dev) < 0))
825 cyl = track / dev->
heads;
826 head = track % dev->
heads;
829 DPRINTK(
"block %u track %u cyl %u head %u sect %u\n",
830 (
u32)block, track, cyl, head, sect);
836 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
839 tf->
nsect = n_block & 0xff;
865 unsigned long mwdma_mask,
866 unsigned long udma_mask)
884 unsigned long *mwdma_mask,
unsigned long *udma_mask)
894 static const struct ata_xfer_ent {
919 int highbit = fls(xfer_mask) - 1;
920 const struct ata_xfer_ent *
ent;
922 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
923 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
924 return ent->base + highbit - ent->shift;
942 const struct ata_xfer_ent *
ent;
944 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
945 if (xfer_mode >= ent->base && xfer_mode < ent->
base + ent->bits)
946 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
947 & ~((1 << ent->shift) - 1);
965 const struct ata_xfer_ent *
ent;
967 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
968 if (xfer_mode >= ent->base && xfer_mode < ent->
base + ent->bits)
989 static const char *
const xfer_mode_str[] = {
1013 highbit = fls(xfer_mask) - 1;
1014 if (highbit >= 0 && highbit <
ARRAY_SIZE(xfer_mode_str))
1015 return xfer_mode_str[highbit];
1021 static const char *
const spd_str[] = {
1027 if (spd == 0 || (spd - 1) >=
ARRAY_SIZE(spd_str))
1029 return spd_str[spd - 1];
1070 if ((tf->
lbam == 0) && (tf->
lbah == 0)) {
1071 DPRINTK(
"found ATA device by sig\n");
1075 if ((tf->
lbam == 0x14) && (tf->
lbah == 0xeb)) {
1076 DPRINTK(
"found ATAPI device by sig\n");
1080 if ((tf->
lbam == 0x69) && (tf->
lbah == 0x96)) {
1081 DPRINTK(
"found PMP device by sig\n");
1085 if ((tf->
lbam == 0x3c) && (tf->
lbah == 0xc3)) {
1086 DPRINTK(
"found SEMB device by sig (could be ATA device)\n");
1110 unsigned int ofs,
unsigned int len)
1145 unsigned int ofs,
unsigned int len)
1152 while (p > s && p[-1] ==
' ')
1157 static u64 ata_id_n_sectors(
const u16 *
id)
1160 if (ata_id_has_lba48(
id))
1165 if (ata_id_current_chs_valid(
id))
1181 sectors |= (tf->
lbah & 0xff) << 16;
1182 sectors |= (tf->
lbam & 0xff) << 8;
1183 sectors |= (tf->
lbal & 0xff);
1192 sectors |= (tf->
device & 0x0f) << 24;
1193 sectors |= (tf->
lbah & 0xff) << 16;
1194 sectors |= (tf->
lbam & 0xff) << 8;
1195 sectors |= (tf->
lbal & 0xff);
1212 static int ata_read_native_max_address(
struct ata_device *dev,
u64 *max_sectors)
1214 unsigned int err_mask;
1216 int lba48 = ata_id_has_lba48(dev->
id);
1218 ata_tf_init(dev, &tf);
1235 "failed to read native max address (err_mask=0x%x)\n",
1263 static int ata_set_max_sectors(
struct ata_device *dev,
u64 new_sectors)
1265 unsigned int err_mask;
1267 int lba48 = ata_id_has_lba48(dev->
id);
1271 ata_tf_init(dev, &tf);
1279 tf.
hob_lbal = (new_sectors >> 24) & 0xff;
1280 tf.
hob_lbam = (new_sectors >> 32) & 0xff;
1281 tf.
hob_lbah = (new_sectors >> 40) & 0xff;
1285 tf.
device |= (new_sectors >> 24) & 0xf;
1291 tf.
lbal = (new_sectors >> 0) & 0xff;
1292 tf.
lbam = (new_sectors >> 8) & 0xff;
1293 tf.
lbah = (new_sectors >> 16) & 0xff;
1298 "failed to set max address (err_mask=0x%x)\n",
1320 static int ata_hpa_resize(
struct ata_device *dev)
1336 rc = ata_read_native_max_address(dev, &native_sectors);
1341 if (rc == -
EACCES || !unlock_hpa) {
1343 "HPA support seems broken, skipping HPA handling\n");
1356 if (native_sectors <= sectors || !unlock_hpa) {
1357 if (!print_info || native_sectors == sectors)
1360 if (native_sectors > sectors)
1362 "HPA detected: current %llu, native %llu\n",
1363 (
unsigned long long)sectors,
1364 (
unsigned long long)native_sectors);
1365 else if (native_sectors < sectors)
1367 "native sectors (%llu) is smaller than sectors (%llu)\n",
1368 (
unsigned long long)native_sectors,
1369 (
unsigned long long)sectors);
1374 rc = ata_set_max_sectors(dev, native_sectors);
1378 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1379 (
unsigned long long)sectors,
1380 (
unsigned long long)native_sectors);
1390 "failed to re-read IDENTIFY data after HPA resizing\n");
1395 u64 new_sectors = ata_id_n_sectors(dev->
id);
1397 "HPA unlocked: %llu -> %llu, native %llu\n",
1398 (
unsigned long long)sectors,
1399 (
unsigned long long)new_sectors,
1400 (
unsigned long long)native_sectors);
1417 static inline void ata_dump_id(
const u16 *
id)
1462 unsigned long pio_mask, mwdma_mask, udma_mask;
1476 pio_mask = (2 <<
mode) - 1;
1490 if (ata_id_is_cfa(
id)) {
1498 pio_mask |= (1 << 5);
1500 pio_mask |= (1 << 6);
1502 mwdma_mask |= (1 << 3);
1504 mwdma_mask |= (1 << 4);
1546 unsigned int n_elem,
unsigned long timeout)
1551 int auto_timeout = 0;
1553 unsigned int tag, preempted_tag;
1554 u32 preempted_sactive, preempted_qc_active;
1555 int preempted_nr_active_links;
1557 unsigned long flags;
1565 spin_unlock_irqrestore(ap->
lock, flags);
1576 if (ap->
ops->error_handler)
1583 qc = __ata_qc_from_tag(ap, tag);
1592 preempted_sactive = link->
sactive;
1622 spin_unlock_irqrestore(ap->
lock, flags);
1625 if (ata_probe_timeout)
1626 timeout = ata_probe_timeout * 1000;
1633 if (ap->
ops->error_handler)
1638 if (ap->
ops->error_handler)
1654 if (ap->
ops->error_handler)
1664 spin_unlock_irqrestore(ap->
lock, flags);
1668 if (ap->
ops->post_internal_cmd)
1669 ap->
ops->post_internal_cmd(qc);
1691 link->
sactive = preempted_sactive;
1695 spin_unlock_irqrestore(ap->
lock, flags);
1724 int dma_dir,
void *
buf,
unsigned int buflen,
1725 unsigned long timeout)
1728 unsigned int n_elem = 0;
1759 ata_tf_init(dev, &tf);
1789 if (ata_id_is_cfa(adev->
id)
1808 static u32 ata_pio_mask_no_iordy(
const struct ata_device *adev)
1866 unsigned int class = *p_class;
1868 unsigned int err_mask = 0;
1871 int may_fallback = 1, tried_spinup = 0;
1878 ata_tf_init(dev, &tf);
1891 reason =
"unsupported class";
1907 if (ap->
ops->read_id)
1908 err_mask = ap->
ops->read_id(dev, &tf,
id);
1914 ata_dev_dbg(dev,
"NODEV after polling detection\n");
1920 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1947 "both IDENTIFYs aborted, assuming NODEV\n");
1952 reason =
"I/O error";
1958 "class=%d may_fallback=%d tried_spinup=%d\n",
1959 class, may_fallback, tried_spinup);
1973 reason =
"device reports invalid type";
1981 "host indicates ignore ATA devices, ignored\n");
1989 if (!tried_spinup && (
id[2] == 0x37c8 ||
id[2] == 0x738c)) {
1997 if (err_mask &&
id[2] != 0x738c) {
1999 reason =
"SPINUP failed";
2006 if (
id[2] == 0x37c8)
2023 err_mask = ata_dev_init_params(dev,
id[3],
id[6]);
2026 reason =
"INIT_DEV_PARAMS failed";
2033 flags &= ~ATA_READID_POSTRESET;
2044 ata_dev_warn(dev,
"failed to IDENTIFY (%s, err_mask=0x%x)\n",
2049 static int ata_do_link_spd_horkage(
struct ata_device *dev)
2062 target_limit = (1 <<
target) - 1;
2075 ata_dev_info(dev,
"applying link speed limit horkage to %s\n",
2082 static inline u8 ata_dev_knobble(
struct ata_device *dev)
2092 static int ata_dev_config_ncq(
struct ata_device *dev,
2093 char *
desc,
size_t desc_sz)
2097 unsigned int err_mask;
2105 snprintf(desc, desc_sz,
"NCQ (not used)");
2120 "failed to enable AA (error_mask=0x%x)\n",
2130 if (hdepth >= ddepth)
2131 snprintf(desc, desc_sz,
"NCQ (depth %d)%s", ddepth, aa_desc);
2133 snprintf(desc, desc_sz,
"NCQ (depth %d/%d)%s", hdepth,
2156 const u16 *
id = dev->
id;
2157 unsigned long xfer_mask;
2158 unsigned int err_mask;
2165 ata_dev_info(dev,
"%s: ENTER/EXIT -- nodev\n", __func__);
2173 dev->
horkage |= ata_dev_blacklisted(dev);
2174 ata_force_horkage(dev);
2184 ata_dev_warn(dev,
"WARNING: ATAPI is %s, device ignored\n",
2185 atapi_enabled ?
"not supported with this driver"
2191 rc = ata_do_link_spd_horkage(dev);
2201 rc = ata_hpa_resize(dev);
2208 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2209 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2211 id[49],
id[82],
id[83],
id[84],
2212 id[85],
id[86],
id[87],
id[88]);
2243 if (ata_id_is_cfa(
id)) {
2247 "supports DRM functions and may not be fully accessible\n");
2250 snprintf(revbuf, 7,
"ATA-%d", ata_id_major_version(
id));
2252 if (ata_id_has_tpm(
id))
2254 "supports DRM functions and may not be fully accessible\n");
2260 if ((dev->
id[47] >> 8) == 0x80 && (dev->
id[59] & 0x100)) {
2261 unsigned int max = dev->
id[47] & 0xff;
2262 unsigned int cnt = dev->
id[59] & 0xff;
2270 const char *lba_desc;
2275 if (ata_id_has_lba48(
id)) {
2280 ata_id_has_flush_ext(
id))
2285 rc = ata_dev_config_ncq(dev, ncq_desc,
sizeof(ncq_desc));
2292 revbuf, modelbuf, fwrevbuf,
2295 "%llu sectors, multi %u: %s %s\n",
2307 if (ata_id_current_chs_valid(
id)) {
2310 dev->
heads =
id[55];
2317 revbuf, modelbuf, fwrevbuf,
2320 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2343 "failed to get Identify Device Data, Emask 0x%x\n",
2352 const char *cdb_intr_string =
"";
2353 const char *atapi_an_string =
"";
2354 const char *dma_dir_string =
"";
2357 rc = atapi_cdb_len(
id);
2373 (!sata_pmp_attached(ap) ||
2380 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2384 atapi_an_string =
", ATAPI AN";
2390 cdb_intr_string =
", CDB intr";
2393 if (atapi_dmadir || atapi_id_dmadir(dev->
id)) {
2395 dma_dir_string =
", DMADIR";
2404 "ATAPI: %s, %s, max %s%s%s%s\n",
2407 cdb_intr_string, atapi_an_string,
2418 if (ata_dev_knobble(dev)) {
2426 (atapi_command_packet_set(
id) ==
TYPE_TAPE)) {
2435 if (ap->
ops->dev_config)
2436 ap->
ops->dev_config(dev);
2447 "Drive reports diagnostics failure. This may indicate a drive\n");
2449 "fault or invalid emulation. Contact drive vendor for information.\n");
2454 ata_dev_warn(dev,
"WARNING: device requires firmware update to be fully functional\n");
2455 ata_dev_warn(dev,
" contact the vendor or visit http://ata.wiki.kernel.org\n");
2569 if (ap->
ops->set_piomode)
2570 ap->
ops->set_piomode(ap, dev);
2574 ap->
ops->phy_reset(ap);
2590 if (tries[dev->
devno])
2593 if (!ata_dev_enabled(dev))
2603 if (ap->
ops->cable_detect)
2604 ap->
cbl = ap->
ops->cable_detect(ap);
2612 if (ata_id_is_sata(dev->
id))
2637 tries[dev->
devno]--;
2642 tries[dev->
devno] = 0;
2649 if (tries[dev->
devno] == 1) {
2658 if (!tries[dev->
devno])
2673 static void sata_print_link_status(
struct ata_link *link)
2675 u32 sstatus, scontrol,
tmp;
2682 tmp = (sstatus >> 4) & 0xf;
2683 ata_link_info(link,
"SATA link up %s (SStatus %X SControl %X)\n",
2686 ata_link_info(link,
"SATA link down (SStatus %X SControl %X)\n",
2703 if (!ata_dev_enabled(pair))
2740 if (rc == 0 && ata_sstatus_online(sstatus))
2741 spd = (sstatus >> 4) & 0xf;
2750 bit = fls(mask) - 1;
2751 mask &= ~(1 <<
bit);
2757 mask &= (1 << (spd - 1)) - 1;
2766 if (mask & ((1 << spd_limit) - 1))
2767 mask &= (1 << spd_limit) - 1;
2769 bit =
ffs(mask) - 1;
2782 static int __sata_set_spd_needed(
struct ata_link *link,
u32 *scontrol)
2784 struct ata_link *host_link = &link->
ap->link;
2793 if (!ata_is_host_link(link) && host_link->
sata_spd)
2794 limit &= (1 << host_link->
sata_spd) - 1;
2799 target = fls(limit);
2801 spd = (*scontrol >> 4) & 0xf;
2802 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2822 static int sata_set_spd_needed(
struct ata_link *link)
2829 return __sata_set_spd_needed(link, &scontrol);
2853 if (!__sata_set_spd_needed(link, &scontrol))
2876 {
XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2877 {
XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2878 {
XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2879 {
XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2880 {
XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2881 {
XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2882 {
XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2884 {
XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2885 {
XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2886 {
XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2888 {
XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2889 {
XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2890 {
XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2891 {
XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2892 {
XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2895 {
XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2896 {
XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2897 {
XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2898 {
XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2899 {
XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2900 {
XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2901 {
XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2906 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2907 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2909 static void ata_timing_quantize(
const struct ata_timing *
t,
struct ata_timing *
q,
int T,
int UT)
2923 struct ata_timing *
m,
unsigned int what)
2938 const struct ata_timing *
t = ata_timing;
2940 while (xfer_mode > t->
mode)
2943 if (xfer_mode == t->
mode)
2946 WARN_ONCE(
true,
"%s: unable to find timing for xfer_mode 0x%x\n",
2947 __func__, xfer_mode);
2953 struct ata_timing *
t,
int T,
int UT)
2955 const u16 *
id = adev->
id;
2956 const struct ata_timing *
s;
2957 struct ata_timing p;
2966 memcpy(t, s,
sizeof(*s));
2974 memset(&p, 0,
sizeof(p));
2992 ata_timing_quantize(t, t, T, UT);
3046 u8 base_mode = 0xff, last_mode = 0xff;
3047 const struct ata_xfer_ent *
ent;
3048 const struct ata_timing *
t;
3050 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3051 if (ent->shift == xfer_shift)
3052 base_mode = ent->base;
3056 unsigned short this_cycle;
3058 switch (xfer_shift) {
3061 this_cycle = t->
cycle;
3064 this_cycle = t->
udma;
3070 if (cycle > this_cycle)
3073 last_mode = t->
mode;
3097 unsigned long orig_mask, xfer_mask;
3098 unsigned long pio_mask, mwdma_mask, udma_mask;
3111 highbit = fls(pio_mask) - 1;
3112 pio_mask &= ~(1 << highbit);
3117 highbit = fls(udma_mask) - 1;
3118 udma_mask &= ~(1 << highbit);
3121 }
else if (mwdma_mask) {
3122 highbit = fls(mwdma_mask) - 1;
3123 mwdma_mask &= ~(1 << highbit);
3146 if (!(xfer_mask &
ATA_MASK_PIO) || xfer_mask == orig_mask)
3151 snprintf(buf,
sizeof(buf),
"%s:%s",
3167 static int ata_dev_set_mode(
struct ata_device *dev)
3172 const char *dev_err_whine =
"";
3173 int ign_dev_err = 0;
3174 unsigned int err_mask = 0;
3182 dev_err_whine =
" (SET_XFERMODE skipped)";
3186 "NOSETXFER but PATA detected - can't "
3187 "skip SETXFER, might malfunction\n");
3188 err_mask = ata_dev_set_xfermode(dev);
3203 if (ata_id_is_cfa(dev->
id))
3207 if (ata_id_major_version(dev->
id) == 0 &&
3220 (dev->
id[63] >> 8) & 1)
3231 dev_err_whine =
" (device error ignored)";
3234 DPRINTK(
"xfer_shift=%u, xfer_mode=0x%x\n",
3244 ata_dev_err(dev,
"failed to set xfermode (err_mask=0x%x)\n", err_mask);
3269 int rc = 0, used_dma = 0, found = 0;
3274 unsigned int mode_mask;
3279 else if (ata_id_is_cfa(dev->
id))
3282 ata_dev_xfermask(dev);
3283 ata_force_xfermask(dev);
3287 if (libata_dma_mask & mode_mask)
3297 if (ata_dma_enabled(dev))
3313 if (ap->
ops->set_piomode)
3314 ap->
ops->set_piomode(ap, dev);
3319 if (!ata_dma_enabled(dev))
3324 if (ap->
ops->set_dmamode)
3325 ap->
ops->set_dmamode(ap, dev);
3330 rc = ata_dev_set_mode(dev);
3339 ap->
host->simplex_claimed = ap;
3343 *r_failed_dev =
dev;
3368 int (*check_ready)(
struct ata_link *link))
3371 unsigned long nodev_deadline;
3387 nodev_deadline = deadline;
3393 ready = tmp = check_ready(link);
3423 (deadline - now > 3 *
HZ)) {
3425 "link is slow to respond, please be patient "
3426 "(ready=%d)\n", tmp);
3449 int (*check_ready)(
struct ata_link *link))
3479 unsigned long deadline)
3481 unsigned long interval = params[0];
3482 unsigned long duration = params[1];
3483 unsigned long last_jiffies,
t;
3487 t = ata_deadline(
jiffies, params[2]);
3509 ata_deadline(last_jiffies, duration)))
3541 unsigned long deadline)
3544 u32 scontrol, serror;
3556 scontrol = (scontrol & 0x0f0) | 0x300;
3569 }
while ((scontrol & 0xf0f) != 0x300 && --tries);
3571 if ((scontrol & 0xf0f) != 0x300) {
3572 ata_link_warn(link,
"failed to resume link (SControl %X)\n",
3578 ata_link_warn(link,
"link resume succeeded after %d retries\n",
3588 return rc != -
EINVAL ? rc : 0;
3613 bool woken_up =
false;
3624 scontrol |= (0x7 << 8);
3627 scontrol |= (0x4 << 12);
3633 scontrol &= ~(0x1 << 8);
3634 scontrol |= (0x6 << 8);
3639 scontrol &= ~(0x7 << 8);
3643 scontrol |= (0x1 << 2);
3684 const unsigned long *
timing = sata_ehc_deb_timing(ehc);
3697 "failed to resume link for reset (errno=%d)\n",
3733 unsigned long deadline,
3744 if (sata_set_spd_needed(link)) {
3753 scontrol = (scontrol & 0x0f0) | 0x304;
3765 scontrol = (scontrol & 0x0f0) | 0x301;
3787 if (sata_pmp_supported(link->
ap) && ata_is_host_link(link)) {
3794 unsigned long pmp_deadline;
3796 pmp_deadline = ata_deadline(
jiffies,
3799 pmp_deadline = deadline;
3810 if (rc && rc != -
EAGAIN) {
3814 ata_link_err(link,
"COMRESET failed (errno=%d)\n", rc);
3835 unsigned long deadline)
3837 const unsigned long *timing = sata_ehc_deb_timing(&link->
eh_context);
3869 sata_print_link_status(link);
3890 static int ata_dev_same_device(
struct ata_device *dev,
unsigned int new_class,
3893 const u16 *old_id = dev->
id;
3897 if (dev->
class != new_class) {
3899 dev->
class, new_class);
3908 if (
strcmp(model[0], model[1])) {
3909 ata_dev_info(dev,
"model number mismatch '%s' != '%s'\n",
3910 model[0], model[1]);
3914 if (
strcmp(serial[0], serial[1])) {
3915 ata_dev_info(dev,
"serial number mismatch '%s' != '%s'\n",
3916 serial[0], serial[1]);
3939 unsigned int class = dev->class;
3940 u16 *
id = (
void *)dev->
link->ap->sector_buf;
3949 if (!ata_dev_same_device(dev,
class,
id))
3972 unsigned int readid_flags)
3978 if (!ata_dev_enabled(dev))
3982 if (ata_class_enabled(new_class) &&
3987 dev->
class, new_class);
4009 (
unsigned long long)n_sectors,
4020 "new n_sectors matches native, probably "
4021 "late HPA unlock, n_sectors updated\n");
4033 dev->
n_sectors < n_sectors && n_sectors == n_native_sectors &&
4036 "old n_sectors matches native, probably "
4037 "late HPA lock, will try to unlock HPA\n");
4048 ata_dev_err(dev,
"revalidation failed (errno=%d)\n", rc);
4201 static int glob_match (
const char *
text,
const char *
pattern)
4205 if (*text == *pattern || *pattern ==
'?') {
4210 if (!*text || *pattern !=
'[')
4212 while (*++pattern && *pattern !=
']' && *text != *pattern) {
4213 if (*pattern ==
'-' && *(pattern - 1) !=
'[')
4214 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4219 if (!*pattern || *pattern ==
']')
4221 while (*pattern && *pattern++ !=
']');
4223 }
while (*++text && *pattern);
4226 if (*pattern ==
'*') {
4231 if (glob_match(text, pattern) == 0)
4236 if (!*text && !*pattern)
4241 static unsigned long ata_dev_blacklisted(
const struct ata_device *dev)
4251 if (!glob_match(model_num, ad->
model_num)) {
4254 if (!glob_match(model_rev, ad->
model_rev))
4262 static int ata_dma_blacklisted(
const struct ata_device *dev)
4282 static int ata_is_40wire(
struct ata_device *dev)
4285 return ata_drive_40wire_relaxed(dev->
id);
4286 return ata_drive_40wire(dev->
id);
4302 static int cable_is_40wire(
struct ata_port *ap)
4333 if (!ata_is_40wire(dev))
4352 static void ata_dev_xfermask(
struct ata_device *dev)
4357 unsigned long xfer_mask;
4379 if (ata_dma_blacklisted(dev)) {
4382 "device is on DMA blacklist, disabling DMA\n");
4389 "simplex DMA is claimed by other device, disabling DMA\n");
4393 xfer_mask &= ata_pio_mask_no_iordy(dev);
4395 if (ap->
ops->mode_filter)
4396 xfer_mask = ap->
ops->mode_filter(dev, xfer_mask);
4408 if (cable_is_40wire(ap)) {
4410 "limited to UDMA/33 due to 40-wire cable\n");
4432 static unsigned int ata_dev_set_xfermode(
struct ata_device *dev)
4435 unsigned int err_mask;
4438 DPRINTK(
"set features - xfer mode\n");
4443 ata_tf_init(dev, &tf);
4459 DPRINTK(
"EXIT, err_mask=%x\n", err_mask);
4481 unsigned int err_mask;
4484 DPRINTK(
"set features - SATA features\n");
4486 ata_tf_init(dev, &tf);
4495 DPRINTK(
"EXIT, err_mask=%x\n", err_mask);
4512 static unsigned int ata_dev_init_params(
struct ata_device *dev,
4516 unsigned int err_mask;
4519 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4523 DPRINTK(
"init dev params \n");
4525 ata_tf_init(dev, &tf);
4530 tf.
device |= (heads - 1) & 0x0f;
4539 DPRINTK(
"EXIT, err_mask=%x\n", err_mask);
4594 if (ap->
ops->check_atapi_dma)
4595 return ap->
ops->check_atapi_dma(qc);
4646 unsigned int n_elem)
4669 unsigned int n_elem;
4677 DPRINTK(
"%d sg elements mapped\n", n_elem);
4702 for (i = 0; i < buf_words; i++)
4727 qc = __ata_qc_from_tag(ap, i);
4750 qc = ata_qc_new(ap);
4782 if (
likely(ata_tag_valid(tag))) {
4796 link = qc->
dev->link;
4832 ap->
ops->qc_fill_rtf(qc);
4839 if (ata_is_nodata(qc->
tf.protocol))
4880 if (ap->
ops->error_handler) {
4916 switch (qc->
tf.command) {
4935 ata_verify_xfer(qc);
4977 if (
unlikely(done_mask & qc_active)) {
4978 ata_port_err(ap,
"illegal qc_active transition (%08x->%08x)\n",
4985 unsigned int tag =
__ffs(done_mask);
4987 qc = ata_qc_from_tag(ap, tag);
4992 done_mask &= ~(1 <<
tag);
5014 u8 prot = qc->
tf.protocol;
5022 if (ata_is_ncq(prot)) {
5046 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5048 if (ata_sg_setup(qc))
5059 ap->
ops->qc_prep(qc);
5109 if (ata_is_host_link(link)) {
5111 return link->
ap->ops->scr_read(link, reg, val);
5136 if (ata_is_host_link(link)) {
5138 return link->
ap->ops->scr_write(link, reg, val);
5162 if (ata_is_host_link(link)) {
5166 rc = link->
ap->ops->scr_write(link, reg, val);
5168 rc = link->
ap->ops->scr_read(link, reg, &val);
5196 ata_sstatus_online(sstatus))
5220 !ata_sstatus_online(sstatus))
5279 unsigned int action,
unsigned int ehi_flags,
5283 unsigned long flags;
5310 link->
eh_info.flags |= ehi_flags;
5315 spin_unlock_irqrestore(ap->
lock, flags);
5342 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5350 return __ata_port_suspend_common(ap, mesg,
NULL);
5353 static int ata_port_suspend(
struct device *dev)
5355 if (pm_runtime_suspended(dev))
5361 static int ata_port_do_freeze(
struct device *dev)
5363 if (pm_runtime_suspended(dev))
5364 pm_runtime_resume(dev);
5369 static int ata_port_poweroff(
struct device *dev)
5371 if (pm_runtime_suspended(dev))
5377 static int __ata_port_resume_common(
struct ata_port *ap,
int *async)
5386 static int ata_port_resume_common(
struct device *dev)
5390 return __ata_port_resume_common(ap,
NULL);
5393 static int ata_port_resume(
struct device *dev)
5397 rc = ata_port_resume_common(dev);
5399 pm_runtime_disable(dev);
5400 pm_runtime_set_active(dev);
5407 static int ata_port_runtime_idle(
struct device *dev)
5409 return pm_runtime_suspend(dev);
5412 static const struct dev_pm_ops ata_port_pm_ops = {
5414 .resume = ata_port_resume,
5415 .freeze = ata_port_do_freeze,
5416 .thaw = ata_port_resume,
5417 .poweroff = ata_port_poweroff,
5418 .restore = ata_port_resume,
5420 .runtime_suspend = ata_port_suspend,
5421 .runtime_resume = ata_port_resume_common,
5422 .runtime_idle = ata_port_runtime_idle,
5430 int ata_sas_port_async_suspend(
struct ata_port *ap,
int *async)
5432 return __ata_port_suspend_common(ap,
PMSG_SUSPEND, async);
5436 int ata_sas_port_async_resume(
struct ata_port *ap,
int *async)
5438 return __ata_port_resume_common(ap, async);
5452 host->
dev->power.power_state = mesg;
5462 void ata_host_resume(
struct ata_host *host)
5471 .pm = &ata_port_pm_ops,
5488 unsigned long flags;
5501 spin_unlock_irqrestore(ap->
lock, flags);
5540 #ifdef CONFIG_ATA_ACPI
5573 ata_force_link_limits(link);
5608 #if defined(ATA_VERBOSE_DEBUG)
5611 #elif defined(ATA_DEBUG)
5632 ap->
stats.unhandled_irq = 1;
5633 ap->
stats.idle_irq = 1;
5640 static void ata_host_release(
struct device *gendev,
void *
res)
5645 for (i = 0; i < host->
n_ports; i++) {
5695 sz =
sizeof(
struct ata_host) + (max_ports + 1) *
sizeof(
void *);
5710 for (i = 0; i < max_ports; i++) {
5757 for (i = 0, j = 0, pi =
NULL; i < host->
n_ports; i++) {
5839 static void ata_host_stop(
struct device *gendev,
void *res)
5846 for (i = 0; i < host->
n_ports; i++) {
5849 if (ap->
ops->port_stop)
5850 ap->
ops->port_stop(ap);
5853 if (host->
ops->host_stop)
5854 host->
ops->host_stop(host);
5881 void **begin = (
void **)ops;
5891 void **inherit = (
void **)cur;
5893 for (
pp = begin;
pp <
end;
pp++, inherit++)
5926 void *start_dr =
NULL;
5932 ata_finalize_port_ops(host->
ops);
5934 for (i = 0; i < host->
n_ports; i++) {
5937 ata_finalize_port_ops(ap->
ops);
5939 if (!host->
ops && !ata_port_is_dummy(ap))
5942 if (ap->
ops->port_stop)
5946 if (host->
ops->host_stop)
5955 for (i = 0; i < host->
n_ports; i++) {
5958 if (ap->
ops->port_start) {
5959 rc = ap->
ops->port_start(ap);
5963 "failed to start port %d (errno=%d)\n",
5980 if (ap->
ops->port_stop)
5981 ap->
ops->port_stop(ap);
6006 unsigned long flags;
6019 spin_unlock_irqrestore(ap->
lock, flags);
6026 if (ap->
ops->error_handler) {
6082 dev_err(host->
dev,
"BUG: trying to register unstarted host\n");
6095 for (i = 0; i < host->
n_ports; i++)
6100 for (i = 0; i < host->
n_ports; i++) {
6112 for (i = 0; i < host->
n_ports; i++) {
6114 unsigned long xfer_mask;
6129 if (!ata_port_is_dummy(ap)) {
6133 ap->
link.eh_info.desc);
6140 for (i = 0; i < host->
n_ports; i++) {
6194 rc = devm_request_irq(host->
dev, irq, irq_handler, irq_flags,
6199 for (i = 0; i < host->
n_ports; i++)
6221 static void ata_port_detach(
struct ata_port *ap)
6223 unsigned long flags;
6225 if (!ap->
ops->error_handler)
6232 spin_unlock_irqrestore(ap->
lock, flags);
6267 for (i = 0; i < host->
n_ports; i++)
6268 ata_port_detach(host->
ports[i]);
6287 void ata_pci_remove_one(
struct pci_dev *pdev)
6296 int pci_test_config_bits(
struct pci_dev *pdev,
const struct pci_bits *
bits)
6298 unsigned long tmp = 0;
6300 switch (bits->width) {
6303 pci_read_config_byte(pdev, bits->reg, &tmp8);
6309 pci_read_config_word(pdev, bits->reg, &tmp16);
6315 pci_read_config_dword(pdev, bits->reg, &tmp32);
6326 return (tmp == bits->val) ? 1 : 0;
6339 int ata_pci_device_do_resume(
struct pci_dev *pdev)
6349 "failed to enable device after resume (%d)\n", rc);
6362 rc = ata_host_suspend(host, mesg);
6366 ata_pci_device_do_suspend(pdev, mesg);
6371 int ata_pci_device_resume(
struct pci_dev *pdev)
6376 rc = ata_pci_device_do_resume(pdev);
6378 ata_host_resume(host);
6385 static int __init ata_parse_force_one(
char **cur,
6401 {
"1.5Gbps", .spd_limit = 1 },
6402 {
"3.0Gbps", .spd_limit = 2 },
6446 char *
id, *
val, *endp;
6448 int nr_matches = 0,
i;
6451 while (*p !=
'\0' && *p !=
',')
6464 val = strstrip(start);
6469 id = strstrip(start);
6470 val = strstrip(p + 1);
6477 if (p == endp || *endp !=
'\0') {
6478 *reason =
"invalid device";
6484 if (p == endp || *endp !=
'\0') {
6485 *reason =
"invalid port/link";
6491 for (i = 0; i <
ARRAY_SIZE(force_tbl); i++) {
6507 *reason =
"unknown value";
6510 if (nr_matches > 1) {
6511 *reason =
"ambigious value";
6515 force_ent->
param = *match_fp;
6520 static void __init ata_parse_force_param(
void)
6523 int last_port = -1, last_device = -1;
6527 for (p = ata_force_param_buf; *
p; p++)
6531 ata_force_tbl = kzalloc(
sizeof(ata_force_tbl[0]) *
size,
GFP_KERNEL);
6532 if (!ata_force_tbl) {
6534 "libata.force ignored\n");
6539 for (cur = ata_force_param_buf; *cur !=
'\0'; cur =
next) {
6540 const char *reason =
"";
6544 if (ata_parse_force_one(&next, &te, &reason)) {
6546 "parameter \"%s\" (%s)\n",
6551 if (te.
port == -1) {
6552 te.
port = last_port;
6556 ata_force_tbl[idx++] = te;
6558 last_port = te.
port;
6562 ata_force_tbl_size =
idx;
6565 static int __init ata_init(
void)
6569 ata_parse_force_param();
6575 kfree(ata_force_tbl);
6594 static void __exit ata_exit(
void)
6600 kfree(ata_force_tbl);
6629 bool owns_eh = ap && ap->
host->eh_owner ==
current;
6665 unsigned long interval,
unsigned long timeout)
6667 unsigned long deadline;
6676 deadline = ata_deadline(
jiffies, timeout);
6689 static unsigned int ata_dummy_qc_issue(
struct ata_queued_cmd *qc)
6694 static void ata_dummy_error_handler(
struct ata_port *ap)
6701 .qc_issue = ata_dummy_qc_issue,
6702 .error_handler = ata_dummy_error_handler,
6715 const char *
fmt, ...)
6735 const char *
fmt, ...)
6746 if (sata_pmp_attached(link->
ap) || link->
ap->slave_link)
6747 r =
printk(
"%sata%u.%02u: %pV",
6748 level, link->
ap->print_id, link->
pmp, &vaf);
6750 r =
printk(
"%sata%u: %pV",
6751 level, link->
ap->print_id, &vaf);
6760 const char *
fmt, ...)
6771 r =
printk(
"%sata%u.%02u: %pV",
6772 level, dev->
link->ap->print_id, dev->
link->pmp + dev->
devno,
6783 dev_printk(
KERN_DEBUG, dev,
"version %s\n", version);