11 #include <linux/bitops.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
17 #include <linux/slab.h>
19 #include <linux/pci.h>
24 #include <scsi/scsi.h>
36 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37 #define SG_MEMPOOL_SIZE 2
46 #define SP(x) { x, "sgpool-" __stringify(x) }
47 #if (SCSI_MAX_SG_SEGMENTS < 32)
48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
53 #if (SCSI_MAX_SG_SEGMENTS > 32)
55 #if (SCSI_MAX_SG_SEGMENTS > 64)
57 #if (SCSI_MAX_SG_SEGMENTS > 128)
59 #if (SCSI_MAX_SG_SEGMENTS > 256)
60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
93 #define SCSI_QUEUE_DELAY 3
107 static void scsi_unprep_request(
struct request *
req)
138 printk(
"Inserting command %p into mlqueue\n", cmd));
182 spin_unlock_irqrestore(q->queue_lock, flags);
206 __scsi_queue_insert(cmd, reason, 1);
226 unsigned char *
sense,
int timeout,
int retries,
int flags,
242 memcpy(req->cmd, cmd, req->cmd_len);
246 req->timeout = timeout;
247 req->cmd_type = REQ_TYPE_BLOCK_PC;
261 if (
unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
262 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
265 *resid = req->resid_len;
288 result =
scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
289 sense, timeout, retries, 0, resid);
309 static void scsi_init_cmd_errh(
struct scsi_cmnd *cmd)
312 scsi_set_resid(cmd, 0);
327 if (
unlikely(scsi_host_in_recovery(shost) &&
333 spin_unlock_irqrestore(sdev->
request_queue->queue_lock, flags);
343 static void scsi_single_lun_run(
struct scsi_device *current_sdev)
352 spin_unlock_irqrestore(shost->
host_lock, flags);
367 if (sdev == current_sdev)
372 spin_unlock_irqrestore(shost->
host_lock, flags);
379 spin_unlock_irqrestore(shost->
host_lock, flags);
382 static inline int scsi_device_is_busy(
struct scsi_device *sdev)
390 static inline int scsi_target_is_busy(
struct scsi_target *starget)
397 static inline int scsi_host_is_busy(
struct Scsi_Host *shost)
427 scsi_single_lun_run(sdev);
443 if (scsi_host_is_busy(shost))
463 spin_unlock_irqrestore(shost->
host_lock, flags);
511 scsi_unprep_request(req);
513 spin_unlock_irqrestore(q->queue_lock, flags);
543 static void __scsi_release_buffers(
struct scsi_cmnd *,
int);
568 int bytes,
int requeue)
589 scsi_requeue_command(q, cmd);
600 __scsi_release_buffers(cmd, 0);
605 static inline unsigned int scsi_sgtable_index(
unsigned short nents)
614 index = get_count_order(nents) - 3;
619 static void scsi_sg_free(
struct scatterlist *
sgl,
unsigned int nents)
623 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
631 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
643 gfp_mask, scsi_sg_alloc);
656 static void __scsi_release_buffers(
struct scsi_cmnd *cmd,
int do_bidi_check)
659 if (cmd->
sdb.table.nents)
660 scsi_free_sgtable(&cmd->
sdb);
664 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
666 cmd->
request->next_rq->special;
667 scsi_free_sgtable(bidi_sdb);
672 if (scsi_prot_sg_count(cmd))
695 __scsi_release_buffers(cmd, 1);
699 static int __scsi_error_from_host_byte(
struct scsi_cmnd *cmd,
int result)
708 set_host_byte(cmd,
DID_OK);
712 set_host_byte(cmd,
DID_OK);
767 int sense_deferred = 0;
768 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
769 ACTION_DELAYED_RETRY}
action;
775 sense_deferred = scsi_sense_is_deferred(&sshdr);
778 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
780 if (sense_valid && req->sense) {
789 req->sense_len = len;
792 error = __scsi_error_from_host_byte(cmd, result);
797 req->errors = cmd->
result;
799 req->resid_len = scsi_get_resid(cmd);
801 if (scsi_bidi_cmnd(cmd)) {
806 req->next_rq->resid_len = scsi_in(cmd)->resid;
825 blk_rq_sectors(req), good_bytes));
838 if ((sshdr.
asc == 0x0) && (sshdr.
ascq == 0x1d))
852 if (scsi_end_request(cmd, error, good_bytes, result == 0) ==
NULL)
855 error = __scsi_error_from_host_byte(cmd, result);
863 }
else if (sense_valid && !sense_deferred) {
866 if (cmd->
device->removable) {
871 description =
"Media Changed";
891 if ((cmd->
device->use_10_for_rw &&
892 sshdr.
asc == 0x20 && sshdr.
ascq == 0x00) &&
896 cmd->
device->use_10_for_rw = 0;
898 }
else if (sshdr.
asc == 0x10) {
899 description =
"Host Data Integrity Failure";
903 }
else if (sshdr.
asc == 0x20 || sshdr.
asc == 0x24) {
904 switch (cmd->
cmnd[0]) {
906 description =
"Discard failure";
910 if (cmd->
cmnd[1] & 0x8)
911 description =
"Discard failure";
914 "Write same failure";
917 description =
"Invalid command failure";
927 if (sshdr.
asc == 0x10) {
928 description =
"Target Data Integrity Failure";
936 if (sshdr.
asc == 0x04) {
937 switch (sshdr.
ascq) {
946 action = ACTION_DELAYED_RETRY;
949 description =
"Device not ready";
954 description =
"Device not ready";
963 description =
"Unhandled sense code";
968 description =
"Unhandled error code";
986 scsi_requeue_command(q, cmd);
995 scsi_requeue_command(q, cmd);
1001 case ACTION_DELAYED_RETRY:
1016 if (
unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1018 return BLKPREP_DEFER;
1030 sdb->
length = blk_rq_bytes(req);
1049 int error = scsi_init_sgtable(rq, &cmd->
sdb, gfp_mask);
1053 if (blk_bidi_rq(rq)) {
1057 error = BLKPREP_DEFER;
1061 rq->next_rq->special = bidi_sdb;
1062 error = scsi_init_sgtable(rq->next_rq, bidi_sdb,
GFP_ATOMIC);
1067 if (blk_integrity_rq(rq)) {
1074 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1075 error = BLKPREP_DEFER;
1080 prot_sdb->
table.sgl);
1103 if (!req->special) {
1113 cmd->
tag = req->tag;
1116 cmd->
cmnd = req->cmd;
1127 if (ret != BLKPREP_OK)
1130 cmd = scsi_get_cmd_from_req(sdev, req);
1132 return BLKPREP_DEFER;
1143 BUG_ON(!req->nr_phys_segments);
1149 BUG_ON(blk_rq_bytes(req));
1156 if (!blk_rq_bytes(req))
1158 else if (rq_data_dir(req) ==
WRITE)
1179 if (ret != BLKPREP_OK)
1185 if (ret != BLKPREP_OK)
1192 BUG_ON(!req->nr_phys_segments);
1194 cmd = scsi_get_cmd_from_req(sdev, req);
1196 return BLKPREP_DEFER;
1205 int ret = BLKPREP_OK;
1221 "rejecting I/O to offline device\n");
1230 "rejecting I/O to dead device\n");
1240 ret = BLKPREP_DEFER;
1269 req->special =
NULL;
1292 int ret = BLKPREP_KILL;
1294 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1306 static inline int scsi_dev_queue_ready(
struct request_queue *q,
1316 "unblocking device at zero depth\n"));
1322 if (scsi_device_is_busy(sdev))
1335 static inline int scsi_target_queue_ready(
struct Scsi_Host *shost,
1353 "unblocking target at zero depth\n"));
1358 if (scsi_target_is_busy(starget)) {
1373 static inline int scsi_host_queue_ready(
struct request_queue *q,
1377 if (scsi_host_in_recovery(shost))
1385 printk(
"scsi%d unblocking host at zero depth\n",
1391 if (scsi_host_is_busy(shost)) {
1421 if (blk_queue_dead(q))
1432 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1455 scsi_init_cmd_errh(cmd);
1475 static void scsi_softirq_done(
struct request *
rq)
1491 "timing out command, waited %lus\n",
1496 scsi_log_completion(cmd, disposition);
1498 switch (disposition) {
1549 if (!req || !scsi_dev_queue_ready(q, sdev))
1552 if (
unlikely(!scsi_device_online(sdev))) {
1554 "rejecting I/O to offline device\n");
1555 scsi_kill_request(req, q);
1567 spin_unlock(q->queue_lock);
1571 "please mail a stack trace to "
1587 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1594 if (!scsi_target_queue_ready(shost, sdev))
1597 if (!scsi_host_queue_ready(q, shost, sdev))
1613 scsi_init_cmd_errh(cmd);
1619 spin_lock_irq(q->queue_lock);
1637 spin_lock_irq(q->queue_lock);
1646 spin_unlock_irq(q->queue_lock);
1648 spin_lock_irq(q->queue_lock);
1653 struct device *host_dev;
1654 u64 bounce_limit = 0xffffffff;
1657 return BLK_BOUNCE_ISA;
1663 return BLK_BOUNCE_ANY;
1665 host_dev = scsi_get_device(shost);
1666 if (host_dev && host_dev->
dma_mask)
1667 bounce_limit = *host_dev->
dma_mask;
1669 return bounce_limit;
1674 request_fn_proc *request_fn)
1689 if (scsi_host_prot_dma(shost)) {
1705 q->limits.cluster = 0;
1789 if (!scsi_sdb_cache) {
1866 unsigned char cmd[10];
1867 unsigned char *real_buffer;
1870 memset(cmd, 0,
sizeof(cmd));
1871 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1879 memcpy(real_buffer + 8, buffer, len);
1885 real_buffer[4] = data->
longlba ? 0x01 : 0;
1901 memcpy(real_buffer + 4, buffer, len);
1914 sshdr, timeout, retries,
NULL);
1942 unsigned char cmd[12];
1948 memset(data, 0,
sizeof(*data));
1950 cmd[1] = dbd & 0x18;
1960 if (use_10_for_ms) {
1979 sshdr, timeout, retries,
NULL);
1986 if (use_10_for_ms && !scsi_status_is_good(result) &&
1988 if (scsi_sense_valid(sshdr)) {
1990 (sshdr->
asc == 0x20) && (sshdr->
ascq == 0)) {
2000 if(scsi_status_is_good(result)) {
2001 if (
unlikely(buffer[0] == 0
x86 && buffer[1] == 0x0b &&
2002 (modepage == 6 || modepage == 8))) {
2010 }
else if(use_10_for_ms) {
2011 data->
length = buffer[0]*256 + buffer[1] + 2;
2014 data->
longlba = buffer[4] & 0x01;
2018 data->
length = buffer[0] + 1;
2052 if (!sshdr_external)
2055 sshdr = sshdr_external;
2060 timeout, retries,
NULL);
2061 if (sdev->
removable && scsi_sense_valid(sshdr) &&
2064 }
while (scsi_sense_valid(sshdr) &&
2067 if (!sshdr_external)
2086 if (state == oldstate)
2189 "Illegal state transition %s->%s\n",
2211 envp[idx++] =
"SDEV_MEDIA_CHANGE=1";
2241 unsigned long flags;
2245 spin_unlock_irqrestore(&sdev->
list_lock, flags);
2253 scsi_evt_emit(sdev, evt);
2268 unsigned long flags;
2283 spin_unlock_irqrestore(&sdev->
list_lock, flags);
2302 INIT_LIST_HEAD(&evt->
node);
2437 unsigned long flags;
2455 spin_unlock_irqrestore(q->queue_lock, flags);
2482 unsigned long flags;
2503 spin_unlock_irqrestore(q->queue_lock, flags);
2516 target_block(
struct device *
dev,
void *
data)
2536 device_unblock(
struct scsi_device *sdev,
void *data)
2542 target_unblock(
struct device *dev,
void *data)
2571 size_t *
offset,
size_t *len)
2574 size_t sg_len = 0, len_complete = 0;
2581 len_complete = sg_len;
2583 if (sg_len > *offset)
2590 __func__, sg_len, *offset, sg_count);
2596 *offset = *offset - len_complete + sg->
offset;
2599 page = nth_page(sg_page(sg), (*offset >>
PAGE_SHIFT));