36 #include <linux/slab.h>
37 #include <linux/kernel.h>
40 #include <linux/export.h>
41 #include <scsi/scsi.h>
52 #include <asm/unaligned.h>
57 #define ATA_SCSI_RBUF_SIZE 4096
69 #define RW_RECOVERY_MPAGE 0x1
70 #define RW_RECOVERY_MPAGE_LEN 12
71 #define CACHE_MPAGE 0x8
72 #define CACHE_MPAGE_LEN 20
73 #define CONTROL_MPAGE 0xa
74 #define CONTROL_MPAGE_LEN 12
75 #define ALL_MPAGES 0x3f
76 #define ALL_SUB_MPAGES 0xff
93 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0xff, 0xff,
107 static const char *ata_lpm_policy_names[] = {
119 struct ata_port *ap = ata_shost_to_port(shost);
125 policy <
ARRAY_SIZE(ata_lpm_policy_names); policy++) {
126 const char *
name = ata_lpm_policy_names[policy];
131 if (policy ==
ARRAY_SIZE(ata_lpm_policy_names))
137 spin_unlock_irqrestore(ap->
lock, flags);
146 struct ata_port *ap = ata_shost_to_port(shost);
155 ata_scsi_lpm_show, ata_scsi_lpm_store);
165 unsigned long flags, now;
169 ap = ata_shost_to_port(sdev->
host);
172 dev = ata_scsi_find_dev(ap, sdev);
192 spin_unlock_irq(ap->
lock);
194 return rc ? rc :
snprintf(buf, 20,
"%u\n", msecs);
197 static ssize_t ata_scsi_park_store(
struct device *device,
199 const char *buf,
size_t len)
209 if (rc || input < -2)
216 ap = ata_shost_to_port(sdev->
host);
219 dev = ata_scsi_find_dev(ap, sdev);
250 spin_unlock_irqrestore(ap->
lock, flags);
252 return rc ? rc : len;
255 ata_scsi_park_show, ata_scsi_park_store);
266 ata_scsi_em_message_store(
struct device *dev,
struct device_attribute *attr,
267 const char *buf,
size_t count)
270 struct ata_port *ap = ata_shost_to_port(shost);
272 return ap->
ops->em_store(ap, buf, count);
281 struct ata_port *ap = ata_shost_to_port(shost);
284 return ap->
ops->em_show(ap, buf);
288 ata_scsi_em_message_show, ata_scsi_em_message_store);
292 ata_scsi_em_message_type_show(
struct device *dev,
struct device_attribute *attr,
296 struct ata_port *ap = ata_shost_to_port(shost);
301 ata_scsi_em_message_type_show,
NULL);
310 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
313 return ap->
ops->sw_activity_show(atadev, buf);
319 const char *buf,
size_t count)
323 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
331 rc = ap->
ops->sw_activity_store(atadev, val);
341 ata_scsi_activity_store);
350 static void ata_scsi_invalid_field(
struct scsi_cmnd *cmd)
404 dev = ata_scsi_find_dev(ap, sdev);
411 spin_unlock_irqrestore(ap->
lock, flags);
430 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
485 memset(scsi_cmd, 0,
sizeof(scsi_cmd));
490 if (argbuf ==
NULL) {
495 scsi_cmd[1] = (4 << 1);
500 scsi_cmd[1] = (3 << 1);
507 scsi_cmd[4] = args[2];
509 scsi_cmd[6] = args[3];
510 scsi_cmd[8] = args[1];
514 scsi_cmd[6] = args[1];
516 scsi_cmd[14] = args[0];
520 cmd_result =
scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
521 sensebuf, (10*
HZ), 5, 0,
NULL);
525 cmd_result &= ~(0xFF<<24);
534 sshdr.
asc == 0 && sshdr.
ascq == 0)
535 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
539 if (sensebuf[0] == 0x72 &&
592 memset(scsi_cmd, 0,
sizeof(scsi_cmd));
594 scsi_cmd[1] = (3 << 1);
596 scsi_cmd[4] = args[1];
597 scsi_cmd[6] = args[2];
598 scsi_cmd[8] = args[3];
599 scsi_cmd[10] = args[4];
600 scsi_cmd[12] = args[5];
601 scsi_cmd[13] = args[6] & 0x4f;
602 scsi_cmd[14] = args[0];
607 sensebuf, (10*
HZ), 5, 0,
NULL);
611 cmd_result &= ~(0xFF<<24);
620 sshdr.
asc == 0 && sshdr.
ascq == 0)
621 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
625 if (sensebuf[0] == 0x72 &&
649 static int ata_ioc32(
struct ata_port *ap)
659 int cmd,
void __user *arg)
668 spin_unlock_irqrestore(ap->
lock, flags);
674 val = (
unsigned long) arg;
683 if (val != ata_ioc32(ap))
686 spin_unlock_irqrestore(ap->
lock, flags);
690 return ata_get_identity(ap, scsidev, arg);
747 qc->
sg = scsi_sglist(cmd);
748 qc->
n_elem = scsi_sg_count(cmd);
785 if (stat & 0x40)
printk(
"DriveReady ");
786 if (stat & 0x20)
printk(
"DeviceFault ");
787 if (stat & 0x10)
printk(
"SeekComplete ");
788 if (stat & 0x08)
printk(
"DataRequest ");
789 if (stat & 0x04)
printk(
"CorrectedError ");
790 if (stat & 0x02)
printk(
"Index ");
791 if (stat & 0x01)
printk(
"Error ");
796 if (
err & 0x04)
printk(
"DriveStatusError ");
801 if (
err & 0x40)
printk(
"UncorrectableError ");
802 if (
err & 0x10)
printk(
"SectorIdNotFound ");
803 if (
err & 0x02)
printk(
"TrackZeroNotFound ");
804 if (
err & 0x01)
printk(
"AddrMarkNotFound ");
827 static void ata_to_sense_error(
unsigned id,
u8 drv_stat,
u8 drv_err,
u8 *sk,
833 static const unsigned char sense_table[][4] = {
862 {0xFF, 0xFF, 0xFF, 0xFF},
864 static const unsigned char stat_table[][4] = {
870 {0xFF, 0xFF, 0xFF, 0xFF},
876 if (drv_stat & ATA_BUSY) {
882 for (i = 0; sense_table[
i][0] != 0xFF; i++) {
884 if ((sense_table[i][0] & drv_err) ==
886 *sk = sense_table[
i][1];
887 *asc = sense_table[
i][2];
888 *ascq = sense_table[
i][3];
895 "error 0x%02x\n",
id, drv_err);
899 for (i = 0; stat_table[
i][0] != 0xFF; i++) {
900 if (stat_table[i][0] & drv_stat) {
901 *sk = stat_table[
i][1];
902 *asc = stat_table[
i][2];
903 *ascq = stat_table[
i][3];
910 "status: 0x%02x\n",
id, drv_stat);
921 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
922 id, drv_stat, drv_err, *sk, *asc, *ascq);
944 unsigned char *
desc = sb + 8;
945 int verbose = qc->
ap->ops->error_handler ==
NULL;
958 &sb[1], &sb[2], &sb[3], verbose);
1014 unsigned char *desc = sb + 8;
1015 int verbose = qc->
ap->ops->error_handler ==
NULL;
1031 &sb[1], &sb[2], &sb[3], verbose);
1043 desc[6] = block >> 40;
1044 desc[7] = block >> 32;
1045 desc[8] = block >> 24;
1046 desc[9] = block >> 16;
1047 desc[10] = block >> 8;
1051 static void ata_scsi_sdev_config(
struct scsi_device *sdev)
1081 static int atapi_drain_needed(
struct request *
rq)
1083 if (
likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
1086 if (!blk_rq_bytes(rq) || (rq->cmd_flags &
REQ_WRITE))
1092 static int ata_scsi_dev_config(
struct scsi_device *sdev,
1097 if (!ata_id_has_unload(dev->
id))
1114 ata_dev_err(dev,
"drain buffer allocation failed\n");
1133 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1170 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1173 ata_scsi_sdev_config(sdev);
1176 rc = ata_scsi_dev_config(sdev, dev);
1199 unsigned long flags;
1202 if (!ap->
ops->error_handler)
1206 dev = __ata_scsi_find_dev(ap, sdev);
1207 if (dev && dev->
sdev) {
1213 spin_unlock_irqrestore(ap->
lock, flags);
1215 kfree(q->dma_drain_buffer);
1216 q->dma_drain_buffer =
NULL;
1217 q->dma_drain_size = 0;
1235 unsigned long flags;
1240 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1243 dev = ata_scsi_find_dev(ap, sdev);
1244 if (!dev || !ata_dev_enabled(dev))
1250 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1254 spin_unlock_irqrestore(ap->
lock, flags);
1257 queue_depth =
min(queue_depth, sdev->
host->can_queue);
1307 static unsigned int ata_scsi_start_stop_xlat(
struct ata_queued_cmd *qc)
1323 if (((cdb[4] >> 4) & 0xf) != 0)
1392 static unsigned int ata_scsi_flush_xlat(
struct ata_queued_cmd *qc)
1420 static void scsi_6_lba_len(
const u8 *cdb,
u64 *plba,
u32 *
plen)
1425 VPRINTK(
"six-byte command\n");
1427 lba |= ((
u64)(cdb[1] & 0x1f)) << 16;
1428 lba |= ((
u64)cdb[2]) << 8;
1429 lba |= ((
u64)cdb[3]);
1447 static void scsi_10_lba_len(
const u8 *cdb,
u64 *plba,
u32 *plen)
1452 VPRINTK(
"ten-byte command\n");
1454 lba |= ((
u64)cdb[2]) << 24;
1455 lba |= ((
u64)cdb[3]) << 16;
1456 lba |= ((
u64)cdb[4]) << 8;
1457 lba |= ((
u64)cdb[5]);
1459 len |= ((
u32)cdb[7]) << 8;
1460 len |= ((
u32)cdb[8]);
1476 static void scsi_16_lba_len(
const u8 *cdb,
u64 *plba,
u32 *plen)
1481 VPRINTK(
"sixteen-byte command\n");
1483 lba |= ((
u64)cdb[2]) << 56;
1484 lba |= ((
u64)cdb[3]) << 48;
1485 lba |= ((
u64)cdb[4]) << 40;
1486 lba |= ((
u64)cdb[5]) << 32;
1487 lba |= ((
u64)cdb[6]) << 24;
1488 lba |= ((
u64)cdb[7]) << 16;
1489 lba |= ((
u64)cdb[8]) << 8;
1490 lba |= ((
u64)cdb[9]);
1492 len |= ((
u32)cdb[10]) << 24;
1493 len |= ((
u32)cdb[11]) << 16;
1494 len |= ((
u32)cdb[12]) << 8;
1495 len |= ((
u32)cdb[13]);
1513 static unsigned int ata_scsi_verify_xlat(
struct ata_queued_cmd *qc)
1518 u64 dev_sectors = qc->
dev->n_sectors;
1519 const u8 *cdb = scmd->
cmnd;
1529 scsi_10_lba_len(cdb, &block, &n_block);
1533 scsi_16_lba_len(cdb, &block, &n_block);
1539 if (block >= dev_sectors)
1541 if ((block + n_block) > dev_sectors)
1547 if (lba_28_ok(block, n_block)) {
1550 tf->
device |= (block >> 24) & 0xf;
1551 }
else if (lba_48_ok(block, n_block)) {
1561 tf->
hob_lbah = (block >> 40) & 0xff;
1562 tf->
hob_lbam = (block >> 32) & 0xff;
1563 tf->
hob_lbal = (block >> 24) & 0xff;
1568 tf->
nsect = n_block & 0xff;
1570 tf->
lbah = (block >> 16) & 0xff;
1571 tf->
lbam = (block >> 8) & 0xff;
1572 tf->
lbal = block & 0xff;
1579 if (!lba_28_ok(block, n_block))
1584 cyl = track / dev->
heads;
1585 head = track % dev->
heads;
1588 DPRINTK(
"block %u track %u cyl %u head %u sect %u\n",
1589 (
u32)block, track, cyl, head, sect);
1595 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1599 tf->
nsect = n_block & 0xff;
1602 tf->
lbah = cyl >> 8;
1644 const u8 *cdb = scmd->
cmnd;
1645 unsigned int tf_flags = 0;
1659 scsi_10_lba_len(cdb, &block, &n_block);
1660 if (cdb[1] & (1 << 3))
1667 scsi_6_lba_len(cdb, &block, &n_block);
1679 scsi_16_lba_len(cdb, &block, &n_block);
1680 if (cdb[1] & (1 << 3))
1730 int need_sense = (qc->
err_mask != 0);
1740 ((cdb[2] & 0x20) || need_sense)) {
1741 ata_gen_passthru_sense(qc);
1752 ata_gen_ata_sense(qc);
1756 if (need_sense && !ap->
ops->error_handler)
1799 qc = ata_scsi_qc_new(dev, cmd);
1806 if (
unlikely(scsi_bufflen(cmd) < 1)) {
1811 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1821 if (ap->
ops->qc_defer) {
1822 if ((rc = ap->
ops->qc_defer(qc)))
1835 DPRINTK(
"EXIT - early finish (good or error)\n");
1869 static void *ata_scsi_rbuf_get(
struct scsi_cmnd *cmd,
bool copy_in,
1870 unsigned long *
flags)
1878 return ata_scsi_rbuf;
1893 static inline void ata_scsi_rbuf_put(
struct scsi_cmnd *cmd,
bool copy_out,
1894 unsigned long *flags)
1899 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1923 unsigned long flags;
1925 rbuf = ata_scsi_rbuf_get(cmd,
false, &flags);
1926 rc = actor(args, rbuf);
1927 ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1945 static unsigned int ata_scsiop_inq_std(
struct ata_scsi_args *args,
u8 *rbuf)
1970 memcpy(rbuf, hdr,
sizeof(hdr));
1971 memcpy(&rbuf[8],
"ATA ", 8);
1975 if (rbuf[32] == 0 || rbuf[32] ==
' ')
1976 memcpy(&rbuf[32],
"n/a ", 4);
1978 memcpy(rbuf + 59, versions,
sizeof(versions));
1993 static unsigned int ata_scsiop_inq_00(
struct ata_scsi_args *args,
u8 *rbuf)
2005 rbuf[3] =
sizeof(
pages);
2006 memcpy(rbuf + 4, pages,
sizeof(pages));
2020 static unsigned int ata_scsiop_inq_80(
struct ata_scsi_args *args,
u8 *rbuf)
2029 memcpy(rbuf, hdr,
sizeof(hdr));
2048 static unsigned int ata_scsiop_inq_83(
struct ata_scsi_args *args,
u8 *rbuf)
2050 const int sat_model_serial_desc_len = 68;
2068 rbuf[num + 3] = sat_model_serial_desc_len;
2070 memcpy(rbuf + num,
"ATA ", 8);
2079 if (ata_id_has_wwn(args->
id)) {
2104 static unsigned int ata_scsiop_inq_89(
struct ata_scsi_args *args,
u8 *rbuf)
2108 memset(&tf, 0,
sizeof(tf));
2111 rbuf[2] = (0x238 >> 8);
2112 rbuf[3] = (0x238 & 0xff);
2114 memcpy(&rbuf[8],
"linux ", 8);
2115 memcpy(&rbuf[16],
"libata ", 16);
2130 memcpy(&rbuf[60], &args->
id[0], 512);
2134 static unsigned int ata_scsiop_inq_b0(
struct ata_scsi_args *args,
u8 *rbuf)
2148 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->
id);
2149 put_unaligned_be16(min_io_sectors, &rbuf[6]);
2160 if (ata_id_has_trim(args->
id)) {
2168 static unsigned int ata_scsiop_inq_b1(
struct ata_scsi_args *args,
u8 *rbuf)
2170 int form_factor = ata_id_form_factor(args->
id);
2171 int media_rotation_rate = ata_id_rotation_rate(args->
id);
2175 rbuf[4] = media_rotation_rate >> 8;
2176 rbuf[5] = media_rotation_rate;
2177 rbuf[7] = form_factor;
2182 static unsigned int ata_scsiop_inq_b2(
struct ata_scsi_args *args,
u8 *rbuf)
2203 static unsigned int ata_scsiop_noop(
struct ata_scsi_args *args,
u8 *rbuf)
2222 static void modecpy(
u8 *
dest,
const u8 *
src,
int n,
bool changeable)
2226 memset(dest + 2, 0, n - 2);
2245 static unsigned int ata_msense_caching(
u16 *
id,
u8 *buf,
bool changeable)
2247 modecpy(buf, def_cache_mpage,
sizeof(def_cache_mpage), changeable);
2248 if (changeable || ata_id_wcache_enabled(
id))
2250 if (!changeable && !ata_id_rahead_enabled(
id))
2251 buf[12] |= (1 << 5);
2252 return sizeof(def_cache_mpage);
2265 static unsigned int ata_msense_ctl_mode(
u8 *buf,
bool changeable)
2267 modecpy(buf, def_control_mpage,
sizeof(def_control_mpage), changeable);
2268 return sizeof(def_control_mpage);
2281 static unsigned int ata_msense_rw_recovery(
u8 *buf,
bool changeable)
2283 modecpy(buf, def_rw_recovery_mpage,
sizeof(def_rw_recovery_mpage),
2285 return sizeof(def_rw_recovery_mpage);
2292 static int ata_dev_supports_fua(
u16 *
id)
2298 if (!ata_id_has_fua(
id))
2304 if (
strcmp(model,
"Maxtor"))
2306 if (
strcmp(fw,
"BANC1G10"))
2324 static unsigned int ata_scsiop_mode_sense(
struct ata_scsi_args *args,
u8 *rbuf)
2327 u8 *scsicmd = args->
cmd->cmnd, *
p = rbuf;
2328 const u8 sat_blk_desc[] = {
2334 unsigned int ebd, page_control, six_byte;
2340 ebd = !(scsicmd[1] & 0x8);
2345 page_control = scsicmd[2] >> 6;
2346 switch (page_control) {
2352 goto saving_not_supp;
2358 p += 4 + (ebd ? 8 : 0);
2360 p += 8 + (ebd ? 8 : 0);
2362 pg = scsicmd[2] & 0x3f;
2373 p += ata_msense_rw_recovery(
p, page_control == 1);
2377 p += ata_msense_caching(args->
id,
p, page_control == 1);
2381 p += ata_msense_ctl_mode(
p, page_control == 1);
2385 p += ata_msense_rw_recovery(
p, page_control == 1);
2386 p += ata_msense_caching(args->
id,
p, page_control == 1);
2387 p += ata_msense_ctl_mode(
p, page_control == 1);
2400 rbuf[0] =
p - rbuf - 1;
2403 rbuf[3] =
sizeof(sat_blk_desc);
2404 memcpy(rbuf + 4, sat_blk_desc,
sizeof(sat_blk_desc));
2407 unsigned int output_len =
p - rbuf - 2;
2409 rbuf[0] = output_len >> 8;
2410 rbuf[1] = output_len;
2413 rbuf[7] =
sizeof(sat_blk_desc);
2414 memcpy(rbuf + 8, sat_blk_desc,
sizeof(sat_blk_desc));
2440 static unsigned int ata_scsiop_read_cap(
struct ata_scsi_args *args,
u8 *rbuf)
2448 sector_size = ata_id_logical_sector_size(dev->
id);
2449 log2_per_phys = ata_id_log2_per_physical_sector(dev->
id);
2450 lowest_aligned = ata_id_logical_sector_offset(dev->
id, log2_per_phys);
2455 if (last_lba >= 0xffffffffULL)
2456 last_lba = 0xffffffff;
2459 rbuf[0] = last_lba >> (8 * 3);
2460 rbuf[1] = last_lba >> (8 * 2);
2461 rbuf[2] = last_lba >> (8 * 1);
2465 rbuf[4] = sector_size >> (8 * 3);
2466 rbuf[5] = sector_size >> (8 * 2);
2467 rbuf[6] = sector_size >> (8 * 1);
2471 rbuf[0] = last_lba >> (8 * 7);
2472 rbuf[1] = last_lba >> (8 * 6);
2473 rbuf[2] = last_lba >> (8 * 5);
2474 rbuf[3] = last_lba >> (8 * 4);
2475 rbuf[4] = last_lba >> (8 * 3);
2476 rbuf[5] = last_lba >> (8 * 2);
2477 rbuf[6] = last_lba >> (8 * 1);
2481 rbuf[ 8] = sector_size >> (8 * 3);
2482 rbuf[ 9] = sector_size >> (8 * 2);
2483 rbuf[10] = sector_size >> (8 * 1);
2487 rbuf[13] = log2_per_phys;
2488 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2489 rbuf[15] = lowest_aligned;
2491 if (ata_id_has_trim(args->
id)) {
2494 if (ata_id_has_zero_after_trim(args->
id))
2512 static unsigned int ata_scsiop_report_luns(
struct ata_scsi_args *args,
u8 *rbuf)
2528 ata_gen_passthru_sense(qc);
2536 static inline int ata_pio_use_silly(
struct ata_port *ap)
2546 DPRINTK(
"ATAPI request sense\n");
2551 #ifdef CONFIG_ATA_SFF
2552 if (ap->
ops->sff_tf_read)
2553 ap->
ops->sff_tf_read(ap, &qc->
tf);
2574 if (ata_pio_use_silly(ap)) {
2594 unsigned int err_mask = qc->
err_mask;
2596 VPRINTK(
"ENTER, err_mask 0x%X\n", err_mask);
2608 ata_gen_passthru_sense(qc);
2625 qc->
dev->sdev->locked = 0;
2636 atapi_request_sense(qc);
2644 ata_gen_passthru_sense(qc);
2648 if ((scsicmd[0] ==
INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2649 unsigned long flags;
2652 buf = ata_scsi_rbuf_get(cmd,
true, &flags);
2667 ata_scsi_rbuf_put(cmd,
true, &flags);
2702 DPRINTK(
"direction: write\n");
2706 ata_qc_set_pc_nbytes(qc);
2717 nbytes =
min(ata_qc_raw_nbytes(qc), (
unsigned int)63 * 1024);
2746 qc->
tf.lbam = (nbytes & 0xFF);
2747 qc->
tf.lbah = (nbytes >> 8);
2772 if (!sata_pmp_attached(ap)) {
2773 if (
likely(devno < ata_link_max_devices(&ap->
link)))
2776 if (
likely(devno < ap->nr_pmp_links))
2789 if (!sata_pmp_attached(ap)) {
2792 devno = scsidev->
id;
2799 return ata_find_dev(ap, devno);
2821 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2823 if (
unlikely(!dev || !ata_dev_enabled(dev)))
2837 ata_scsi_map_proto(
u8 byte1)
2839 switch((byte1 & 0x1e) >> 1) {
2875 static unsigned int ata_scsi_pass_thru(
struct ata_queued_cmd *qc)
2880 const u8 *cdb = scmd->
cmnd;
2895 if (cdb[1] & 0x01) {
3001 ata_qc_set_pc_nbytes(qc);
3008 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
3011 if (is_multi_taskfile(tf)) {
3059 static unsigned int ata_scsi_write_same_xlat(
struct ata_queued_cmd *qc)
3064 const u8 *cdb = scmd->
cmnd;
3076 scsi_16_lba_len(cdb, &block, &n_block);
3086 if (!scsi_sg_count(scmd))
3090 size = ata_set_lba_range_entries(buf, 512, block, n_block);
3096 tf->
nsect = size / 512;
3101 ata_qc_set_pc_nbytes(qc);
3123 const u8 *buf,
int len)
3138 wce = buf[0] & (1 << 2);
3143 ata_msense_caching(dev->
id, mpage,
false);
3144 mpage[2] &= ~(1 << 2);
3168 static unsigned int ata_scsi_mode_select_xlat(
struct ata_queued_cmd *qc)
3171 const u8 *cdb = scmd->
cmnd;
3174 unsigned six_byte, pg_len,
hdr_len, bd_len;
3190 len = (cdb[7] << 8) + cdb[8];
3195 if ((cdb[1] & 0x11) != 0x10)
3199 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
3200 goto invalid_param_len;
3206 goto invalid_param_len;
3211 bd_len = (p[6] << 8) + p[7];
3216 goto invalid_param_len;
3217 if (bd_len != 0 && bd_len != 8)
3229 goto invalid_param_len;
3232 pg_len = (p[2] << 8) | p[3];
3237 goto invalid_param_len;
3252 goto invalid_param_len;
3256 if (ata_mselect_caching(qc, p, pg_len) < 0)
3315 return ata_scsi_rw_xlat;
3318 return ata_scsi_write_same_xlat;
3321 if (ata_try_flush_cache(dev))
3322 return ata_scsi_flush_xlat;
3327 return ata_scsi_verify_xlat;
3331 return ata_scsi_pass_thru;
3335 return ata_scsi_mode_select_xlat;
3339 return ata_scsi_start_stop_xlat;
3353 static inline void ata_scsi_dump_cdb(
struct ata_port *ap,
3360 DPRINTK(
"CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
3363 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
3364 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
3369 static inline int __ata_scsi_queuecmd(
struct scsi_cmnd *scmd,
3372 u8 scsi_op = scmd->
cmnd[0];
3380 xlat_func = ata_get_xlat_func(dev, scsi_op);
3392 xlat_func = atapi_xlat;
3398 xlat_func = ata_get_xlat_func(dev, scsi_op);
3403 rc = ata_scsi_translate(dev, scmd, xlat_func);
3410 DPRINTK(
"bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3442 unsigned long irq_flags;
3444 ap = ata_shost_to_port(shost);
3448 ata_scsi_dump_cdb(ap, cmd);
3450 dev = ata_scsi_find_dev(ap, scsidev);
3452 rc = __ata_scsi_queuecmd(cmd, dev);
3458 spin_unlock_irqrestore(ap->
lock, irq_flags);
3478 const u8 *scsicmd = cmd->
cmnd;
3486 switch(scsicmd[0]) {
3489 ata_scsi_invalid_field(cmd);
3494 ata_scsi_invalid_field(cmd);
3495 else if ((scsicmd[1] & 1) == 0)
3496 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3497 else switch (scsicmd[2]) {
3499 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
3502 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
3505 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
3508 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
3511 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
3514 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3517 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
3520 ata_scsi_invalid_field(cmd);
3527 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
3531 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3536 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3538 ata_scsi_invalid_field(cmd);
3542 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
3546 ata_scsi_set_sense(cmd, 0, 0, 0);
3562 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3566 tmp8 = scsicmd[1] & ~(1 << 3);
3567 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3568 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3570 ata_scsi_invalid_field(cmd);
3586 for (i = 0; i < host->
n_ports; i++) {
3596 *(
struct ata_port **)&shost->hostdata[0] = ap;
3649 if (ata_is_host_link(link))
3652 channel = link->
pmp;
3656 if (!IS_ERR(sdev)) {
3685 if (dev != last_failed_dev) {
3687 last_failed_dev =
dev;
3700 "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
3741 static void ata_scsi_remove_dev(
struct ata_device *dev)
3745 unsigned long flags;
3780 spin_unlock_irqrestore(ap->
lock, flags);
3785 dev_name(&sdev->sdev_gendev));
3792 static void ata_scsi_handle_link_detach(
struct ata_link *
link)
3798 unsigned long flags;
3805 spin_unlock_irqrestore(ap->
lock, flags);
3807 ata_scsi_remove_dev(dev);
3847 DPRINTK(
"ENTER/EXIT - unloading\n");
3858 ata_scsi_handle_link_detach(&ap->
link);
3861 ata_scsi_handle_link_detach(&ap->
pmp_link[i]);
3887 unsigned int id,
unsigned int lun)
3889 struct ata_port *ap = ata_shost_to_port(shost);
3890 unsigned long flags;
3893 if (!ap->
ops->error_handler)
3899 if (!sata_pmp_attached(ap)) {
3920 struct ata_device *dev = ata_find_dev(ap, devno);
3932 spin_unlock_irqrestore(ap->
lock, flags);
3935 spin_unlock_irqrestore(ap->
lock, flags);
3956 unsigned long flags;
3970 spin_unlock_irqrestore(ap->
lock, flags);
3977 spin_unlock_irqrestore(ap->
lock, flags);
4035 if (!ap->
ops->error_handler)
4089 int rc = ap->
ops->port_start(ap);
4106 if (ap->
ops->port_stop)
4107 ap->
ops->port_stop(ap);
4123 ata_scsi_sdev_config(sdev);
4124 ata_scsi_dev_config(sdev, ap->
link.device);
4143 ata_scsi_dump_cdb(ap, cmd);
4145 if (
likely(ata_dev_enabled(ap->
link.device)))
4146 rc = __ata_scsi_queuecmd(cmd, ap->
link.device);