Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scsi_error.c
Go to the documentation of this file.
1 /*
2  * scsi_error.c Copyright (C) 1997 Eric Youngdale
3  *
4  * SCSI error/timeout handling
5  * Initial versions: Eric Youngdale. Based upon conversations with
6  * Leonard Zubkoff and David Miller at Linux Expo,
7  * ideas originating from all over the place.
8  *
9  * Restructured scsi_unjam_host and associated functions.
10  * September 04, 2002 Mike Anderson ([email protected])
11  *
12  * Forward port of Russell King's ([email protected]) changes and
13  * minor cleanups.
14  * September 30, 2002 Mike Anderson ([email protected])
15  */
16 
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/gfp.h>
20 #include <linux/timer.h>
21 #include <linux/string.h>
22 #include <linux/kernel.h>
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26 #include <linux/blkdev.h>
27 #include <linux/delay.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_dbg.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_driver.h>
34 #include <scsi/scsi_eh.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_ioctl.h>
38 
39 #include "scsi_priv.h"
40 #include "scsi_logging.h"
41 #include "scsi_transport_api.h"
42 
43 #include <trace/events/scsi.h>
44 
45 static void scsi_eh_done(struct scsi_cmnd *scmd);
46 
47 #define SENSE_TIMEOUT (10*HZ)
48 
49 /*
50  * These should *probably* be handled by the host itself.
51  * Since it is allowed to sleep, it probably should.
52  */
53 #define BUS_RESET_SETTLE_TIME (10)
54 #define HOST_RESET_SETTLE_TIME (10)
55 
56 static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
57 
58 /* called with shost->host_lock held */
60 {
61  if (shost->host_busy == shost->host_failed) {
62  trace_scsi_eh_wakeup(shost);
63  wake_up_process(shost->ehandler);
65  printk("Waking error handler thread\n"));
66  }
67 }
68 
76 {
77  unsigned long flags;
78 
79  spin_lock_irqsave(shost->host_lock, flags);
80 
81  if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
83  shost->host_eh_scheduled++;
84  scsi_eh_wakeup(shost);
85  }
86 
87  spin_unlock_irqrestore(shost->host_lock, flags);
88 }
90 
99 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
100 {
101  struct Scsi_Host *shost = scmd->device->host;
102  unsigned long flags;
103  int ret = 0;
104 
105  if (!shost->ehandler)
106  return 0;
107 
108  spin_lock_irqsave(shost->host_lock, flags);
111  goto out_unlock;
112 
113  ret = 1;
114  scmd->eh_eflags |= eh_flag;
115  list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
116  shost->host_failed++;
117  scsi_eh_wakeup(shost);
118  out_unlock:
119  spin_unlock_irqrestore(shost->host_lock, flags);
120  return ret;
121 }
122 
133 enum blk_eh_timer_return scsi_times_out(struct request *req)
134 {
135  struct scsi_cmnd *scmd = req->special;
136  enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
137  struct Scsi_Host *host = scmd->device->host;
138 
139  trace_scsi_dispatch_cmd_timeout(scmd);
140  scsi_log_completion(scmd, TIMEOUT_ERROR);
141 
142  if (host->transportt->eh_timed_out)
143  rtn = host->transportt->eh_timed_out(scmd);
144  else if (host->hostt->eh_timed_out)
145  rtn = host->hostt->eh_timed_out(scmd);
146 
147  scmd->result |= DID_TIME_OUT << 16;
148 
149  if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
151  rtn = BLK_EH_HANDLED;
152 
153  return rtn;
154 }
155 
168 {
169  int online;
170 
171  wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
172 
173  online = scsi_device_online(sdev);
174 
175  SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
176  online));
177 
178  return online;
179 }
181 
182 #ifdef CONFIG_SCSI_LOGGING
183 
188 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
189  struct list_head *work_q)
190 {
191  struct scsi_cmnd *scmd;
192  struct scsi_device *sdev;
193  int total_failures = 0;
194  int cmd_failed = 0;
195  int cmd_cancel = 0;
196  int devices_failed = 0;
197 
198  shost_for_each_device(sdev, shost) {
199  list_for_each_entry(scmd, work_q, eh_entry) {
200  if (scmd->device == sdev) {
201  ++total_failures;
202  if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
203  ++cmd_cancel;
204  else
205  ++cmd_failed;
206  }
207  }
208 
209  if (cmd_cancel || cmd_failed) {
211  sdev_printk(KERN_INFO, sdev,
212  "%s: cmds failed: %d, cancel: %d\n",
213  __func__, cmd_failed,
214  cmd_cancel));
215  cmd_cancel = 0;
216  cmd_failed = 0;
217  ++devices_failed;
218  }
219  }
220 
221  SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
222  " devices require eh work\n",
223  total_failures, devices_failed));
224 }
225 #endif
226 
238 static int scsi_check_sense(struct scsi_cmnd *scmd)
239 {
240  struct scsi_device *sdev = scmd->device;
241  struct scsi_sense_hdr sshdr;
242 
243  if (! scsi_command_normalize_sense(scmd, &sshdr))
244  return FAILED; /* no valid sense data */
245 
246  if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
247  /*
248  * nasty: for mid-layer issued TURs, we need to return the
249  * actual sense data without any recovery attempt. For eh
250  * issued ones, we need to try to recover and interpret
251  */
252  return SUCCESS;
253 
254  if (scsi_sense_is_deferred(&sshdr))
255  return NEEDS_RETRY;
256 
257  if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
258  sdev->scsi_dh_data->scsi_dh->check_sense) {
259  int rc;
260 
261  rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
262  if (rc != SCSI_RETURN_NOT_HANDLED)
263  return rc;
264  /* handler does not care. Drop down to default handling */
265  }
266 
267  /*
268  * Previous logic looked for FILEMARK, EOM or ILI which are
269  * mainly associated with tapes and returned SUCCESS.
270  */
271  if (sshdr.response_code == 0x70) {
272  /* fixed format */
273  if (scmd->sense_buffer[2] & 0xe0)
274  return SUCCESS;
275  } else {
276  /*
277  * descriptor format: look for "stream commands sense data
278  * descriptor" (see SSC-3). Assume single sense data
279  * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
280  */
281  if ((sshdr.additional_length > 3) &&
282  (scmd->sense_buffer[8] == 0x4) &&
283  (scmd->sense_buffer[11] & 0xe0))
284  return SUCCESS;
285  }
286 
287  switch (sshdr.sense_key) {
288  case NO_SENSE:
289  return SUCCESS;
290  case RECOVERED_ERROR:
291  return /* soft_error */ SUCCESS;
292 
293  case ABORTED_COMMAND:
294  if (sshdr.asc == 0x10) /* DIF */
295  return SUCCESS;
296 
297  return NEEDS_RETRY;
298  case NOT_READY:
299  case UNIT_ATTENTION:
300  /*
301  * if we are expecting a cc/ua because of a bus reset that we
302  * performed, treat this just as a retry. otherwise this is
303  * information that we should pass up to the upper-level driver
304  * so that we can deal with it there.
305  */
306  if (scmd->device->expecting_cc_ua) {
307  /*
308  * Because some device does not queue unit
309  * attentions correctly, we carefully check
310  * additional sense code and qualifier so as
311  * not to squash media change unit attention.
312  */
313  if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
314  scmd->device->expecting_cc_ua = 0;
315  return NEEDS_RETRY;
316  }
317  }
318  /*
319  * if the device is in the process of becoming ready, we
320  * should retry.
321  */
322  if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
323  return NEEDS_RETRY;
324  /*
325  * if the device is not started, we need to wake
326  * the error handler to start the motor
327  */
328  if (scmd->device->allow_restart &&
329  (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
330  return FAILED;
331 
332  if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
334  "Warning! Received an indication that the "
335  "LUN assignments on this target have "
336  "changed. The Linux SCSI layer does not "
337  "automatically remap LUN assignments.\n");
338  else if (sshdr.asc == 0x3f)
340  "Warning! Received an indication that the "
341  "operating parameters on this target have "
342  "changed. The Linux SCSI layer does not "
343  "automatically adjust these parameters.\n");
344 
345  if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
347  "Warning! Received an indication that the "
348  "LUN reached a thin provisioning soft "
349  "threshold.\n");
350 
351  /*
352  * Pass the UA upwards for a determination in the completion
353  * functions.
354  */
355  return SUCCESS;
356 
357  /* these are not supported */
358  case COPY_ABORTED:
359  case VOLUME_OVERFLOW:
360  case MISCOMPARE:
361  case BLANK_CHECK:
362  case DATA_PROTECT:
363  return TARGET_ERROR;
364 
365  case MEDIUM_ERROR:
366  if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
367  sshdr.asc == 0x13 || /* AMNF DATA FIELD */
368  sshdr.asc == 0x14) { /* RECORD NOT FOUND */
369  return TARGET_ERROR;
370  }
371  return NEEDS_RETRY;
372 
373  case HARDWARE_ERROR:
374  if (scmd->device->retry_hwerror)
375  return ADD_TO_MLQUEUE;
376  else
377  return TARGET_ERROR;
378 
379  case ILLEGAL_REQUEST:
380  if (sshdr.asc == 0x20 || /* Invalid command operation code */
381  sshdr.asc == 0x21 || /* Logical block address out of range */
382  sshdr.asc == 0x24 || /* Invalid field in cdb */
383  sshdr.asc == 0x26) { /* Parameter value invalid */
384  return TARGET_ERROR;
385  }
386  return SUCCESS;
387 
388  default:
389  return SUCCESS;
390  }
391 }
392 
393 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
394 {
395  struct scsi_host_template *sht = sdev->host->hostt;
396  struct scsi_device *tmp_sdev;
397 
398  if (!sht->change_queue_depth ||
399  sdev->queue_depth >= sdev->max_queue_depth)
400  return;
401 
402  if (time_before(jiffies,
404  return;
405 
406  if (time_before(jiffies,
408  return;
409 
410  /*
411  * Walk all devices of a target and do
412  * ramp up on them.
413  */
414  shost_for_each_device(tmp_sdev, sdev->host) {
415  if (tmp_sdev->channel != sdev->channel ||
416  tmp_sdev->id != sdev->id ||
417  tmp_sdev->queue_depth == sdev->max_queue_depth)
418  continue;
419  /*
420  * call back into LLD to increase queue_depth by one
421  * with ramp up reason code.
422  */
423  sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
425  sdev->last_queue_ramp_up = jiffies;
426  }
427 }
428 
429 static void scsi_handle_queue_full(struct scsi_device *sdev)
430 {
431  struct scsi_host_template *sht = sdev->host->hostt;
432  struct scsi_device *tmp_sdev;
433 
434  if (!sht->change_queue_depth)
435  return;
436 
437  shost_for_each_device(tmp_sdev, sdev->host) {
438  if (tmp_sdev->channel != sdev->channel ||
439  tmp_sdev->id != sdev->id)
440  continue;
441  /*
442  * We do not know the number of commands that were at
443  * the device when we got the queue full so we start
444  * from the highest possible value and work our way down.
445  */
446  sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1,
448  }
449 }
450 
461 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
462 {
463  /*
464  * first check the host byte, to see if there is anything in there
465  * that would indicate what we need to do.
466  */
467  if (host_byte(scmd->result) == DID_RESET) {
468  /*
469  * rats. we are already in the error handler, so we now
470  * get to try and figure out what to do next. if the sense
471  * is valid, we have a pretty good idea of what to do.
472  * if not, we mark it as FAILED.
473  */
474  return scsi_check_sense(scmd);
475  }
476  if (host_byte(scmd->result) != DID_OK)
477  return FAILED;
478 
479  /*
480  * next, check the message byte.
481  */
482  if (msg_byte(scmd->result) != COMMAND_COMPLETE)
483  return FAILED;
484 
485  /*
486  * now, check the status byte to see if this indicates
487  * anything special.
488  */
489  switch (status_byte(scmd->result)) {
490  case GOOD:
491  scsi_handle_queue_ramp_up(scmd->device);
492  case COMMAND_TERMINATED:
493  return SUCCESS;
494  case CHECK_CONDITION:
495  return scsi_check_sense(scmd);
496  case CONDITION_GOOD:
497  case INTERMEDIATE_GOOD:
498  case INTERMEDIATE_C_GOOD:
499  /*
500  * who knows? FIXME(eric)
501  */
502  return SUCCESS;
504  if (scmd->cmnd[0] == TEST_UNIT_READY)
505  /* it is a success, we probed the device and
506  * found it */
507  return SUCCESS;
508  /* otherwise, we failed to send the command */
509  return FAILED;
510  case QUEUE_FULL:
511  scsi_handle_queue_full(scmd->device);
512  /* fall through */
513  case BUSY:
514  return NEEDS_RETRY;
515  default:
516  return FAILED;
517  }
518  return FAILED;
519 }
520 
525 static void scsi_eh_done(struct scsi_cmnd *scmd)
526 {
527  struct completion *eh_action;
528 
530  printk("%s scmd: %p result: %x\n",
531  __func__, scmd, scmd->result));
532 
533  eh_action = scmd->device->host->eh_action;
534  if (eh_action)
535  complete(eh_action);
536 }
537 
542 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
543 {
544  unsigned long flags;
545  int rtn;
546  struct Scsi_Host *host = scmd->device->host;
547  struct scsi_host_template *hostt = host->hostt;
548 
549  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
550  __func__));
551 
552  if (!hostt->eh_host_reset_handler)
553  return FAILED;
554 
555  rtn = hostt->eh_host_reset_handler(scmd);
556 
557  if (rtn == SUCCESS) {
558  if (!hostt->skip_settle_delay)
559  ssleep(HOST_RESET_SETTLE_TIME);
560  spin_lock_irqsave(host->host_lock, flags);
561  scsi_report_bus_reset(host, scmd_channel(scmd));
562  spin_unlock_irqrestore(host->host_lock, flags);
563  }
564 
565  return rtn;
566 }
567 
572 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
573 {
574  unsigned long flags;
575  int rtn;
576  struct Scsi_Host *host = scmd->device->host;
577  struct scsi_host_template *hostt = host->hostt;
578 
579  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
580  __func__));
581 
582  if (!hostt->eh_bus_reset_handler)
583  return FAILED;
584 
585  rtn = hostt->eh_bus_reset_handler(scmd);
586 
587  if (rtn == SUCCESS) {
588  if (!hostt->skip_settle_delay)
589  ssleep(BUS_RESET_SETTLE_TIME);
590  spin_lock_irqsave(host->host_lock, flags);
591  scsi_report_bus_reset(host, scmd_channel(scmd));
592  spin_unlock_irqrestore(host->host_lock, flags);
593  }
594 
595  return rtn;
596 }
597 
598 static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
599 {
600  sdev->was_reset = 1;
601  sdev->expecting_cc_ua = 1;
602 }
603 
614 static int scsi_try_target_reset(struct scsi_cmnd *scmd)
615 {
616  unsigned long flags;
617  int rtn;
618  struct Scsi_Host *host = scmd->device->host;
619  struct scsi_host_template *hostt = host->hostt;
620 
621  if (!hostt->eh_target_reset_handler)
622  return FAILED;
623 
624  rtn = hostt->eh_target_reset_handler(scmd);
625  if (rtn == SUCCESS) {
626  spin_lock_irqsave(host->host_lock, flags);
628  __scsi_report_device_reset);
629  spin_unlock_irqrestore(host->host_lock, flags);
630  }
631 
632  return rtn;
633 }
634 
645 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
646 {
647  int rtn;
648  struct scsi_host_template *hostt = scmd->device->host->hostt;
649 
650  if (!hostt->eh_device_reset_handler)
651  return FAILED;
652 
653  rtn = hostt->eh_device_reset_handler(scmd);
654  if (rtn == SUCCESS)
655  __scsi_report_device_reset(scmd->device, NULL);
656  return rtn;
657 }
658 
659 static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
660 {
661  if (!hostt->eh_abort_handler)
662  return FAILED;
663 
664  return hostt->eh_abort_handler(scmd);
665 }
666 
667 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
668 {
669  if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
670  if (scsi_try_bus_device_reset(scmd) != SUCCESS)
671  if (scsi_try_target_reset(scmd) != SUCCESS)
672  if (scsi_try_bus_reset(scmd) != SUCCESS)
673  scsi_try_host_reset(scmd);
674 }
675 
690 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
691  unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
692 {
693  struct scsi_device *sdev = scmd->device;
694 
695  /*
696  * We need saved copies of a number of fields - this is because
697  * error handling may need to overwrite these with different values
698  * to run different commands, and once error handling is complete,
699  * we will need to restore these values prior to running the actual
700  * command.
701  */
702  ses->cmd_len = scmd->cmd_len;
703  ses->cmnd = scmd->cmnd;
704  ses->data_direction = scmd->sc_data_direction;
705  ses->sdb = scmd->sdb;
706  ses->next_rq = scmd->request->next_rq;
707  ses->result = scmd->result;
708  ses->underflow = scmd->underflow;
709  ses->prot_op = scmd->prot_op;
710 
711  scmd->prot_op = SCSI_PROT_NORMAL;
712  scmd->cmnd = ses->eh_cmnd;
713  memset(scmd->cmnd, 0, BLK_MAX_CDB);
714  memset(&scmd->sdb, 0, sizeof(scmd->sdb));
715  scmd->request->next_rq = NULL;
716 
717  if (sense_bytes) {
718  scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
719  sense_bytes);
720  sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
721  scmd->sdb.length);
722  scmd->sdb.table.sgl = &ses->sense_sgl;
724  scmd->sdb.table.nents = 1;
725  scmd->cmnd[0] = REQUEST_SENSE;
726  scmd->cmnd[4] = scmd->sdb.length;
727  scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
728  } else {
729  scmd->sc_data_direction = DMA_NONE;
730  if (cmnd) {
731  BUG_ON(cmnd_size > BLK_MAX_CDB);
732  memcpy(scmd->cmnd, cmnd, cmnd_size);
733  scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
734  }
735  }
736 
737  scmd->underflow = 0;
738 
739  if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
740  scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
741  (sdev->lun << 5 & 0xe0);
742 
743  /*
744  * Zero the sense buffer. The scsi spec mandates that any
745  * untransferred sense data should be interpreted as being zero.
746  */
748 }
750 
758 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
759 {
760  /*
761  * Restore original data
762  */
763  scmd->cmd_len = ses->cmd_len;
764  scmd->cmnd = ses->cmnd;
765  scmd->sc_data_direction = ses->data_direction;
766  scmd->sdb = ses->sdb;
767  scmd->request->next_rq = ses->next_rq;
768  scmd->result = ses->result;
769  scmd->underflow = ses->underflow;
770  scmd->prot_op = ses->prot_op;
771 }
773 
788 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
789  int cmnd_size, int timeout, unsigned sense_bytes)
790 {
791  struct scsi_device *sdev = scmd->device;
792  struct Scsi_Host *shost = sdev->host;
794  unsigned long timeleft;
795  struct scsi_eh_save ses;
796  int rtn;
797 
798  scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
799  shost->eh_action = &done;
800 
801  scsi_log_send(scmd);
802  scmd->scsi_done = scsi_eh_done;
803  shost->hostt->queuecommand(shost, scmd);
804 
805  timeleft = wait_for_completion_timeout(&done, timeout);
806 
807  shost->eh_action = NULL;
808 
809  scsi_log_completion(scmd, SUCCESS);
810 
812  printk("%s: scmd: %p, timeleft: %ld\n",
813  __func__, scmd, timeleft));
814 
815  /*
816  * If there is time left scsi_eh_done got called, and we will
817  * examine the actual status codes to see whether the command
818  * actually did complete normally, else tell the host to forget
819  * about this command.
820  */
821  if (timeleft) {
822  rtn = scsi_eh_completed_normally(scmd);
824  printk("%s: scsi_eh_completed_normally %x\n",
825  __func__, rtn));
826 
827  switch (rtn) {
828  case SUCCESS:
829  case NEEDS_RETRY:
830  case FAILED:
831  case TARGET_ERROR:
832  break;
833  case ADD_TO_MLQUEUE:
834  rtn = NEEDS_RETRY;
835  break;
836  default:
837  rtn = FAILED;
838  break;
839  }
840  } else {
841  scsi_abort_eh_cmnd(scmd);
842  rtn = FAILED;
843  }
844 
845  scsi_eh_restore_cmnd(scmd, &ses);
846 
847  if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
848  struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
849  if (sdrv->eh_action)
850  rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
851  }
852 
853  return rtn;
854 }
855 
865 static int scsi_request_sense(struct scsi_cmnd *scmd)
866 {
867  return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
868 }
869 
882 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
883 {
884  scmd->device->host->host_failed--;
885  scmd->eh_eflags = 0;
886  list_move_tail(&scmd->eh_entry, done_q);
887 }
889 
910 int scsi_eh_get_sense(struct list_head *work_q,
911  struct list_head *done_q)
912 {
913  struct scsi_cmnd *scmd, *next;
914  int rtn;
915 
916  list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
917  if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
918  SCSI_SENSE_VALID(scmd))
919  continue;
920 
922  "%s: requesting sense\n",
923  current->comm));
924  rtn = scsi_request_sense(scmd);
925  if (rtn != SUCCESS)
926  continue;
927 
928  SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
929  " result %x\n", scmd,
930  scmd->result));
932 
933  rtn = scsi_decide_disposition(scmd);
934 
935  /*
936  * if the result was normal, then just pass it along to the
937  * upper level.
938  */
939  if (rtn == SUCCESS)
940  /* we don't want this command reissued, just
941  * finished with the sense data, so set
942  * retries to the max allowed to ensure it
943  * won't get reissued */
944  scmd->retries = scmd->allowed;
945  else if (rtn != NEEDS_RETRY)
946  continue;
947 
948  scsi_eh_finish_cmd(scmd, done_q);
949  }
950 
951  return list_empty(work_q);
952 }
954 
962 static int scsi_eh_tur(struct scsi_cmnd *scmd)
963 {
964  static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
965  int retry_cnt = 1, rtn;
966 
967 retry_tur:
968  rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
969 
970  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
971  __func__, scmd, rtn));
972 
973  switch (rtn) {
974  case NEEDS_RETRY:
975  if (retry_cnt--)
976  goto retry_tur;
977  /*FALLTHRU*/
978  case SUCCESS:
979  return 0;
980  default:
981  return 1;
982  }
983 }
984 
998 static int scsi_eh_test_devices(struct list_head *cmd_list,
999  struct list_head *work_q,
1000  struct list_head *done_q, int try_stu)
1001 {
1002  struct scsi_cmnd *scmd, *next;
1003  struct scsi_device *sdev;
1004  int finish_cmds;
1005 
1006  while (!list_empty(cmd_list)) {
1007  scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1008  sdev = scmd->device;
1009 
1010  finish_cmds = !scsi_device_online(scmd->device) ||
1011  (try_stu && !scsi_eh_try_stu(scmd) &&
1012  !scsi_eh_tur(scmd)) ||
1013  !scsi_eh_tur(scmd);
1014 
1015  list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1016  if (scmd->device == sdev) {
1017  if (finish_cmds)
1018  scsi_eh_finish_cmd(scmd, done_q);
1019  else
1020  list_move_tail(&scmd->eh_entry, work_q);
1021  }
1022  }
1023  return list_empty(work_q);
1024 }
1025 
1026 
1039 static int scsi_eh_abort_cmds(struct list_head *work_q,
1040  struct list_head *done_q)
1041 {
1042  struct scsi_cmnd *scmd, *next;
1043  LIST_HEAD(check_list);
1044  int rtn;
1045 
1046  list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1047  if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
1048  continue;
1049  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
1050  "0x%p\n", current->comm,
1051  scmd));
1052  rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
1053  if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1054  scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
1055  if (rtn == FAST_IO_FAIL)
1056  scsi_eh_finish_cmd(scmd, done_q);
1057  else
1058  list_move_tail(&scmd->eh_entry, &check_list);
1059  } else
1060  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
1061  " cmd failed:"
1062  "0x%p\n",
1063  current->comm,
1064  scmd));
1065  }
1066 
1067  return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1068 }
1069 
1077 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1078 {
1079  static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1080 
1081  if (scmd->device->allow_restart) {
1082  int i, rtn = NEEDS_RETRY;
1083 
1084  for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1085  rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1086 
1087  if (rtn == SUCCESS)
1088  return 0;
1089  }
1090 
1091  return 1;
1092 }
1093 
1104 static int scsi_eh_stu(struct Scsi_Host *shost,
1105  struct list_head *work_q,
1106  struct list_head *done_q)
1107 {
1108  struct scsi_cmnd *scmd, *stu_scmd, *next;
1109  struct scsi_device *sdev;
1110 
1111  shost_for_each_device(sdev, shost) {
1112  stu_scmd = NULL;
1113  list_for_each_entry(scmd, work_q, eh_entry)
1114  if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1115  scsi_check_sense(scmd) == FAILED ) {
1116  stu_scmd = scmd;
1117  break;
1118  }
1119 
1120  if (!stu_scmd)
1121  continue;
1122 
1123  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
1124  " 0x%p\n", current->comm, sdev));
1125 
1126  if (!scsi_eh_try_stu(stu_scmd)) {
1127  if (!scsi_device_online(sdev) ||
1128  !scsi_eh_tur(stu_scmd)) {
1129  list_for_each_entry_safe(scmd, next,
1130  work_q, eh_entry) {
1131  if (scmd->device == sdev)
1132  scsi_eh_finish_cmd(scmd, done_q);
1133  }
1134  }
1135  } else {
1137  printk("%s: START_UNIT failed to sdev:"
1138  " 0x%p\n", current->comm, sdev));
1139  }
1140  }
1141 
1142  return list_empty(work_q);
1143 }
1144 
1145 
1158 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1159  struct list_head *work_q,
1160  struct list_head *done_q)
1161 {
1162  struct scsi_cmnd *scmd, *bdr_scmd, *next;
1163  struct scsi_device *sdev;
1164  int rtn;
1165 
1166  shost_for_each_device(sdev, shost) {
1167  bdr_scmd = NULL;
1168  list_for_each_entry(scmd, work_q, eh_entry)
1169  if (scmd->device == sdev) {
1170  bdr_scmd = scmd;
1171  break;
1172  }
1173 
1174  if (!bdr_scmd)
1175  continue;
1176 
1177  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
1178  " 0x%p\n", current->comm,
1179  sdev));
1180  rtn = scsi_try_bus_device_reset(bdr_scmd);
1181  if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1182  if (!scsi_device_online(sdev) ||
1183  rtn == FAST_IO_FAIL ||
1184  !scsi_eh_tur(bdr_scmd)) {
1185  list_for_each_entry_safe(scmd, next,
1186  work_q, eh_entry) {
1187  if (scmd->device == sdev)
1188  scsi_eh_finish_cmd(scmd,
1189  done_q);
1190  }
1191  }
1192  } else {
1193  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
1194  " failed sdev:"
1195  "0x%p\n",
1196  current->comm,
1197  sdev));
1198  }
1199  }
1200 
1201  return list_empty(work_q);
1202 }
1203 
1213 static int scsi_eh_target_reset(struct Scsi_Host *shost,
1214  struct list_head *work_q,
1215  struct list_head *done_q)
1216 {
1217  LIST_HEAD(tmp_list);
1218  LIST_HEAD(check_list);
1219 
1220  list_splice_init(work_q, &tmp_list);
1221 
1222  while (!list_empty(&tmp_list)) {
1223  struct scsi_cmnd *next, *scmd;
1224  int rtn;
1225  unsigned int id;
1226 
1227  scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1228  id = scmd_id(scmd);
1229 
1230  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
1231  "to target %d\n",
1232  current->comm, id));
1233  rtn = scsi_try_target_reset(scmd);
1234  if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1235  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
1236  " failed target: "
1237  "%d\n",
1238  current->comm, id));
1239  list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1240  if (scmd_id(scmd) != id)
1241  continue;
1242 
1243  if (rtn == SUCCESS)
1244  list_move_tail(&scmd->eh_entry, &check_list);
1245  else if (rtn == FAST_IO_FAIL)
1246  scsi_eh_finish_cmd(scmd, done_q);
1247  else
1248  /* push back on work queue for further processing */
1249  list_move(&scmd->eh_entry, work_q);
1250  }
1251  }
1252 
1253  return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1254 }
1255 
1262 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1263  struct list_head *work_q,
1264  struct list_head *done_q)
1265 {
1266  struct scsi_cmnd *scmd, *chan_scmd, *next;
1267  LIST_HEAD(check_list);
1268  unsigned int channel;
1269  int rtn;
1270 
1271  /*
1272  * we really want to loop over the various channels, and do this on
1273  * a channel by channel basis. we should also check to see if any
1274  * of the failed commands are on soft_reset devices, and if so, skip
1275  * the reset.
1276  */
1277 
1278  for (channel = 0; channel <= shost->max_channel; channel++) {
1279  chan_scmd = NULL;
1280  list_for_each_entry(scmd, work_q, eh_entry) {
1281  if (channel == scmd_channel(scmd)) {
1282  chan_scmd = scmd;
1283  break;
1284  /*
1285  * FIXME add back in some support for
1286  * soft_reset devices.
1287  */
1288  }
1289  }
1290 
1291  if (!chan_scmd)
1292  continue;
1293  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1294  " %d\n", current->comm,
1295  channel));
1296  rtn = scsi_try_bus_reset(chan_scmd);
1297  if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1298  list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1299  if (channel == scmd_channel(scmd)) {
1300  if (rtn == FAST_IO_FAIL)
1301  scsi_eh_finish_cmd(scmd,
1302  done_q);
1303  else
1304  list_move_tail(&scmd->eh_entry,
1305  &check_list);
1306  }
1307  }
1308  } else {
1309  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1310  " failed chan: %d\n",
1311  current->comm,
1312  channel));
1313  }
1314  }
1315  return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1316 }
1317 
1323 static int scsi_eh_host_reset(struct list_head *work_q,
1324  struct list_head *done_q)
1325 {
1326  struct scsi_cmnd *scmd, *next;
1327  LIST_HEAD(check_list);
1328  int rtn;
1329 
1330  if (!list_empty(work_q)) {
1331  scmd = list_entry(work_q->next,
1332  struct scsi_cmnd, eh_entry);
1333 
1334  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1335  , current->comm));
1336 
1337  rtn = scsi_try_host_reset(scmd);
1338  if (rtn == SUCCESS) {
1339  list_splice_init(work_q, &check_list);
1340  } else if (rtn == FAST_IO_FAIL) {
1341  list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1342  scsi_eh_finish_cmd(scmd, done_q);
1343  }
1344  } else {
1345  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1346  " failed\n",
1347  current->comm));
1348  }
1349  }
1350  return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1351 }
1352 
1358 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1359  struct list_head *done_q)
1360 {
1361  struct scsi_cmnd *scmd, *next;
1362 
1363  list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1364  sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1365  "not ready after error recovery\n");
1367  if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1368  /*
1369  * FIXME: Handle lost cmds.
1370  */
1371  }
1372  scsi_eh_finish_cmd(scmd, done_q);
1373  }
1374  return;
1375 }
1376 
1381 int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1382 {
1383  switch (host_byte(scmd->result)) {
1384  case DID_OK:
1385  break;
1386  case DID_BUS_BUSY:
1387  return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1388  case DID_PARITY:
1389  return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1390  case DID_ERROR:
1391  if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1393  return 0;
1394  /* fall through */
1395  case DID_SOFT_ERROR:
1396  return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1397  }
1398 
1399  switch (status_byte(scmd->result)) {
1400  case CHECK_CONDITION:
1401  /*
1402  * assume caller has checked sense and determinted
1403  * the check condition was retryable.
1404  */
1405  if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1406  scmd->request->cmd_type == REQ_TYPE_BLOCK_PC)
1407  return 1;
1408  }
1409 
1410  return 0;
1411 }
1412 
1428 {
1429  int rtn;
1430 
1431  /*
1432  * if the device is offline, then we clearly just pass the result back
1433  * up to the top level.
1434  */
1435  if (!scsi_device_online(scmd->device)) {
1436  SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1437  " as SUCCESS\n",
1438  __func__));
1439  return SUCCESS;
1440  }
1441 
1442  /*
1443  * first check the host byte, to see if there is anything in there
1444  * that would indicate what we need to do.
1445  */
1446  switch (host_byte(scmd->result)) {
1447  case DID_PASSTHROUGH:
1448  /*
1449  * no matter what, pass this through to the upper layer.
1450  * nuke this special code so that it looks like we are saying
1451  * did_ok.
1452  */
1453  scmd->result &= 0xff00ffff;
1454  return SUCCESS;
1455  case DID_OK:
1456  /*
1457  * looks good. drop through, and check the next byte.
1458  */
1459  break;
1460  case DID_NO_CONNECT:
1461  case DID_BAD_TARGET:
1462  case DID_ABORT:
1463  /*
1464  * note - this means that we just report the status back
1465  * to the top level driver, not that we actually think
1466  * that it indicates SUCCESS.
1467  */
1468  return SUCCESS;
1469  /*
1470  * when the low level driver returns did_soft_error,
1471  * it is responsible for keeping an internal retry counter
1472  * in order to avoid endless loops (db)
1473  *
1474  * actually this is a bug in this function here. we should
1475  * be mindful of the maximum number of retries specified
1476  * and not get stuck in a loop.
1477  */
1478  case DID_SOFT_ERROR:
1479  goto maybe_retry;
1480  case DID_IMM_RETRY:
1481  return NEEDS_RETRY;
1482 
1483  case DID_REQUEUE:
1484  return ADD_TO_MLQUEUE;
1486  /*
1487  * LLD/transport was disrupted during processing of the IO.
1488  * The transport class is now blocked/blocking,
1489  * and the transport will decide what to do with the IO
1490  * based on its timers and recovery capablilities if
1491  * there are enough retries.
1492  */
1493  goto maybe_retry;
1495  /*
1496  * The transport decided to failfast the IO (most likely
1497  * the fast io fail tmo fired), so send IO directly upwards.
1498  */
1499  return SUCCESS;
1500  case DID_ERROR:
1501  if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1503  /*
1504  * execute reservation conflict processing code
1505  * lower down
1506  */
1507  break;
1508  /* fallthrough */
1509  case DID_BUS_BUSY:
1510  case DID_PARITY:
1511  goto maybe_retry;
1512  case DID_TIME_OUT:
1513  /*
1514  * when we scan the bus, we get timeout messages for
1515  * these commands if there is no device available.
1516  * other hosts report did_no_connect for the same thing.
1517  */
1518  if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1519  scmd->cmnd[0] == INQUIRY)) {
1520  return SUCCESS;
1521  } else {
1522  return FAILED;
1523  }
1524  case DID_RESET:
1525  return SUCCESS;
1526  default:
1527  return FAILED;
1528  }
1529 
1530  /*
1531  * next, check the message byte.
1532  */
1533  if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1534  return FAILED;
1535 
1536  /*
1537  * check the status byte to see if this indicates anything special.
1538  */
1539  switch (status_byte(scmd->result)) {
1540  case QUEUE_FULL:
1541  scsi_handle_queue_full(scmd->device);
1542  /*
1543  * the case of trying to send too many commands to a
1544  * tagged queueing device.
1545  */
1546  case BUSY:
1547  /*
1548  * device can't talk to us at the moment. Should only
1549  * occur (SAM-3) when the task queue is empty, so will cause
1550  * the empty queue handling to trigger a stall in the
1551  * device.
1552  */
1553  return ADD_TO_MLQUEUE;
1554  case GOOD:
1555  scsi_handle_queue_ramp_up(scmd->device);
1556  case COMMAND_TERMINATED:
1557  return SUCCESS;
1558  case TASK_ABORTED:
1559  goto maybe_retry;
1560  case CHECK_CONDITION:
1561  rtn = scsi_check_sense(scmd);
1562  if (rtn == NEEDS_RETRY)
1563  goto maybe_retry;
1564  else if (rtn == TARGET_ERROR) {
1565  /*
1566  * Need to modify host byte to signal a
1567  * permanent target failure
1568  */
1569  set_host_byte(scmd, DID_TARGET_FAILURE);
1570  rtn = SUCCESS;
1571  }
1572  /* if rtn == FAILED, we have no sense information;
1573  * returning FAILED will wake the error handler thread
1574  * to collect the sense and redo the decide
1575  * disposition */
1576  return rtn;
1577  case CONDITION_GOOD:
1578  case INTERMEDIATE_GOOD:
1579  case INTERMEDIATE_C_GOOD:
1580  case ACA_ACTIVE:
1581  /*
1582  * who knows? FIXME(eric)
1583  */
1584  return SUCCESS;
1585 
1586  case RESERVATION_CONFLICT:
1587  sdev_printk(KERN_INFO, scmd->device,
1588  "reservation conflict\n");
1589  set_host_byte(scmd, DID_NEXUS_FAILURE);
1590  return SUCCESS; /* causes immediate i/o error */
1591  default:
1592  return FAILED;
1593  }
1594  return FAILED;
1595 
1596  maybe_retry:
1597 
1598  /* we requeue for retry because the error was retryable, and
1599  * the request was not marked fast fail. Note that above,
1600  * even if the request is marked fast fail, we still requeue
1601  * for queue congestion conditions (QUEUE_FULL or BUSY) */
1602  if ((++scmd->retries) <= scmd->allowed
1603  && !scsi_noretry_cmd(scmd)) {
1604  return NEEDS_RETRY;
1605  } else {
1606  /*
1607  * no more retries - report this one back to upper level.
1608  */
1609  return SUCCESS;
1610  }
1611 }
1612 
1613 static void eh_lock_door_done(struct request *req, int uptodate)
1614 {
1615  __blk_put_request(req->q, req);
1616 }
1617 
1629 static void scsi_eh_lock_door(struct scsi_device *sdev)
1630 {
1631  struct request *req;
1632 
1633  /*
1634  * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1635  * request becomes available
1636  */
1638 
1639  req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1640  req->cmd[1] = 0;
1641  req->cmd[2] = 0;
1642  req->cmd[3] = 0;
1643  req->cmd[4] = SCSI_REMOVAL_PREVENT;
1644  req->cmd[5] = 0;
1645 
1646  req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1647 
1648  req->cmd_type = REQ_TYPE_BLOCK_PC;
1649  req->cmd_flags |= REQ_QUIET;
1650  req->timeout = 10 * HZ;
1651  req->retries = 5;
1652 
1653  blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1654 }
1655 
1664 static void scsi_restart_operations(struct Scsi_Host *shost)
1665 {
1666  struct scsi_device *sdev;
1667  unsigned long flags;
1668 
1669  /*
1670  * If the door was locked, we need to insert a door lock request
1671  * onto the head of the SCSI request queue for the device. There
1672  * is no point trying to lock the door of an off-line device.
1673  */
1674  shost_for_each_device(sdev, shost) {
1675  if (scsi_device_online(sdev) && sdev->locked)
1676  scsi_eh_lock_door(sdev);
1677  }
1678 
1679  /*
1680  * next free up anything directly waiting upon the host. this
1681  * will be requests for character device operations, and also for
1682  * ioctls to queued block devices.
1683  */
1684  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1685  __func__));
1686 
1687  spin_lock_irqsave(shost->host_lock, flags);
1688  if (scsi_host_set_state(shost, SHOST_RUNNING))
1689  if (scsi_host_set_state(shost, SHOST_CANCEL))
1691  spin_unlock_irqrestore(shost->host_lock, flags);
1692 
1693  wake_up(&shost->host_wait);
1694 
1695  /*
1696  * finally we need to re-initiate requests that may be pending. we will
1697  * have had everything blocked while error handling is taking place, and
1698  * now that error recovery is done, we will need to ensure that these
1699  * requests are started.
1700  */
1701  scsi_run_host_queues(shost);
1702 
1703  /*
1704  * if eh is active and host_eh_scheduled is pending we need to re-run
1705  * recovery. we do this check after scsi_run_host_queues() to allow
1706  * everything pent up since the last eh run a chance to make forward
1707  * progress before we sync again. Either we'll immediately re-run
1708  * recovery or scsi_device_unbusy() will wake us again when these
1709  * pending commands complete.
1710  */
1711  spin_lock_irqsave(shost->host_lock, flags);
1712  if (shost->host_eh_scheduled)
1713  if (scsi_host_set_state(shost, SHOST_RECOVERY))
1715  spin_unlock_irqrestore(shost->host_lock, flags);
1716 }
1717 
1724 void scsi_eh_ready_devs(struct Scsi_Host *shost,
1725  struct list_head *work_q,
1726  struct list_head *done_q)
1727 {
1728  if (!scsi_eh_stu(shost, work_q, done_q))
1729  if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1730  if (!scsi_eh_target_reset(shost, work_q, done_q))
1731  if (!scsi_eh_bus_reset(shost, work_q, done_q))
1732  if (!scsi_eh_host_reset(work_q, done_q))
1733  scsi_eh_offline_sdevs(work_q,
1734  done_q);
1735 }
1737 
1742 void scsi_eh_flush_done_q(struct list_head *done_q)
1743 {
1744  struct scsi_cmnd *scmd, *next;
1745 
1746  list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1747  list_del_init(&scmd->eh_entry);
1748  if (scsi_device_online(scmd->device) &&
1749  !scsi_noretry_cmd(scmd) &&
1750  (++scmd->retries <= scmd->allowed)) {
1751  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1752  " retry cmd: %p\n",
1753  current->comm,
1754  scmd));
1756  } else {
1757  /*
1758  * If just we got sense for the device (called
1759  * scsi_eh_get_sense), scmd->result is already
1760  * set, do not set DRIVER_TIMEOUT.
1761  */
1762  if (!scmd->result)
1763  scmd->result |= (DRIVER_TIMEOUT << 24);
1764  SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1765  " cmd: %p\n",
1766  current->comm, scmd));
1767  scsi_finish_command(scmd);
1768  }
1769  }
1770 }
1772 
1796 static void scsi_unjam_host(struct Scsi_Host *shost)
1797 {
1798  unsigned long flags;
1799  LIST_HEAD(eh_work_q);
1800  LIST_HEAD(eh_done_q);
1801 
1802  spin_lock_irqsave(shost->host_lock, flags);
1803  list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1804  spin_unlock_irqrestore(shost->host_lock, flags);
1805 
1806  SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1807 
1808  if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1809  if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1810  scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1811 
1812  scsi_eh_flush_done_q(&eh_done_q);
1813 }
1814 
1824 {
1825  struct Scsi_Host *shost = data;
1826 
1827  /*
1828  * We use TASK_INTERRUPTIBLE so that the thread is not
1829  * counted against the load average as a running process.
1830  * We never actually get interrupted because kthread_run
1831  * disables signal delivery for the created thread.
1832  */
1833  while (!kthread_should_stop()) {
1835  if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1836  shost->host_failed != shost->host_busy) {
1838  printk("Error handler scsi_eh_%d sleeping\n",
1839  shost->host_no));
1840  schedule();
1841  continue;
1842  }
1843 
1846  printk("Error handler scsi_eh_%d waking up\n",
1847  shost->host_no));
1848 
1849  /*
1850  * We have a host that is failing for some reason. Figure out
1851  * what we need to do to get it up and online again (if we can).
1852  * If we fail, we end up taking the thing offline.
1853  */
1854  if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
1856  printk(KERN_ERR "Error handler scsi_eh_%d "
1857  "unable to autoresume\n",
1858  shost->host_no));
1859  continue;
1860  }
1861 
1862  if (shost->transportt->eh_strategy_handler)
1863  shost->transportt->eh_strategy_handler(shost);
1864  else
1865  scsi_unjam_host(shost);
1866 
1867  /*
1868  * Note - if the above fails completely, the action is to take
1869  * individual devices offline and flush the queue of any
1870  * outstanding requests that may have been pending. When we
1871  * restart, we restart any I/O to any other devices on the bus
1872  * which are still online.
1873  */
1874  scsi_restart_operations(shost);
1875  if (!shost->eh_noresume)
1876  scsi_autopm_put_host(shost);
1877  }
1879 
1881  printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1882  shost->ehandler = NULL;
1883  return 0;
1884 }
1885 
1886 /*
1887  * Function: scsi_report_bus_reset()
1888  *
1889  * Purpose: Utility function used by low-level drivers to report that
1890  * they have observed a bus reset on the bus being handled.
1891  *
1892  * Arguments: shost - Host in question
1893  * channel - channel on which reset was observed.
1894  *
1895  * Returns: Nothing
1896  *
1897  * Lock status: Host lock must be held.
1898  *
1899  * Notes: This only needs to be called if the reset is one which
1900  * originates from an unknown location. Resets originated
1901  * by the mid-level itself don't need to call this, but there
1902  * should be no harm.
1903  *
1904  * The main purpose of this is to make sure that a CHECK_CONDITION
1905  * is properly treated.
1906  */
1907 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1908 {
1909  struct scsi_device *sdev;
1910 
1911  __shost_for_each_device(sdev, shost) {
1912  if (channel == sdev_channel(sdev))
1913  __scsi_report_device_reset(sdev, NULL);
1914  }
1915 }
1917 
1918 /*
1919  * Function: scsi_report_device_reset()
1920  *
1921  * Purpose: Utility function used by low-level drivers to report that
1922  * they have observed a device reset on the device being handled.
1923  *
1924  * Arguments: shost - Host in question
1925  * channel - channel on which reset was observed
1926  * target - target on which reset was observed
1927  *
1928  * Returns: Nothing
1929  *
1930  * Lock status: Host lock must be held
1931  *
1932  * Notes: This only needs to be called if the reset is one which
1933  * originates from an unknown location. Resets originated
1934  * by the mid-level itself don't need to call this, but there
1935  * should be no harm.
1936  *
1937  * The main purpose of this is to make sure that a CHECK_CONDITION
1938  * is properly treated.
1939  */
1940 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1941 {
1942  struct scsi_device *sdev;
1943 
1944  __shost_for_each_device(sdev, shost) {
1945  if (channel == sdev_channel(sdev) &&
1946  target == sdev_id(sdev))
1947  __scsi_report_device_reset(sdev, NULL);
1948  }
1949 }
1951 
1952 static void
1953 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1954 {
1955 }
1956 
1957 /*
1958  * Function: scsi_reset_provider
1959  *
1960  * Purpose: Send requested reset to a bus or device at any phase.
1961  *
1962  * Arguments: device - device to send reset to
1963  * flag - reset type (see scsi.h)
1964  *
1965  * Returns: SUCCESS/FAILURE.
1966  *
1967  * Notes: This is used by the SCSI Generic driver to provide
1968  * Bus/Device reset capability.
1969  */
1970 int
1972 {
1973  struct scsi_cmnd *scmd;
1974  struct Scsi_Host *shost = dev->host;
1975  struct request req;
1976  unsigned long flags;
1977  int rtn;
1978 
1979  if (scsi_autopm_get_host(shost) < 0)
1980  return FAILED;
1981 
1982  scmd = scsi_get_command(dev, GFP_KERNEL);
1983  blk_rq_init(NULL, &req);
1984  scmd->request = &req;
1985 
1986  scmd->cmnd = req.cmd;
1987 
1988  scmd->scsi_done = scsi_reset_provider_done_command;
1989  memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1990 
1991  scmd->cmd_len = 0;
1992 
1994 
1995  spin_lock_irqsave(shost->host_lock, flags);
1996  shost->tmf_in_progress = 1;
1997  spin_unlock_irqrestore(shost->host_lock, flags);
1998 
1999  switch (flag) {
2000  case SCSI_TRY_RESET_DEVICE:
2001  rtn = scsi_try_bus_device_reset(scmd);
2002  if (rtn == SUCCESS)
2003  break;
2004  /* FALLTHROUGH */
2005  case SCSI_TRY_RESET_TARGET:
2006  rtn = scsi_try_target_reset(scmd);
2007  if (rtn == SUCCESS)
2008  break;
2009  /* FALLTHROUGH */
2010  case SCSI_TRY_RESET_BUS:
2011  rtn = scsi_try_bus_reset(scmd);
2012  if (rtn == SUCCESS)
2013  break;
2014  /* FALLTHROUGH */
2015  case SCSI_TRY_RESET_HOST:
2016  rtn = scsi_try_host_reset(scmd);
2017  break;
2018  default:
2019  rtn = FAILED;
2020  }
2021 
2022  spin_lock_irqsave(shost->host_lock, flags);
2023  shost->tmf_in_progress = 0;
2024  spin_unlock_irqrestore(shost->host_lock, flags);
2025 
2026  /*
2027  * be sure to wake up anyone who was sleeping or had their queue
2028  * suspended while we performed the TMF.
2029  */
2031  printk("%s: waking up host to restart after TMF\n",
2032  __func__));
2033 
2034  wake_up(&shost->host_wait);
2035 
2036  scsi_run_host_queues(shost);
2037 
2038  scsi_next_command(scmd);
2039  scsi_autopm_put_host(shost);
2040  return rtn;
2041 }
2043 
2063 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2064  struct scsi_sense_hdr *sshdr)
2065 {
2066  if (!sense_buffer || !sb_len)
2067  return 0;
2068 
2069  memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2070 
2071  sshdr->response_code = (sense_buffer[0] & 0x7f);
2072 
2073  if (!scsi_sense_valid(sshdr))
2074  return 0;
2075 
2076  if (sshdr->response_code >= 0x72) {
2077  /*
2078  * descriptor format
2079  */
2080  if (sb_len > 1)
2081  sshdr->sense_key = (sense_buffer[1] & 0xf);
2082  if (sb_len > 2)
2083  sshdr->asc = sense_buffer[2];
2084  if (sb_len > 3)
2085  sshdr->ascq = sense_buffer[3];
2086  if (sb_len > 7)
2087  sshdr->additional_length = sense_buffer[7];
2088  } else {
2089  /*
2090  * fixed format
2091  */
2092  if (sb_len > 2)
2093  sshdr->sense_key = (sense_buffer[2] & 0xf);
2094  if (sb_len > 7) {
2095  sb_len = (sb_len < (sense_buffer[7] + 8)) ?
2096  sb_len : (sense_buffer[7] + 8);
2097  if (sb_len > 12)
2098  sshdr->asc = sense_buffer[12];
2099  if (sb_len > 13)
2100  sshdr->ascq = sense_buffer[13];
2101  }
2102  }
2103 
2104  return 1;
2105 }
2107 
2109  struct scsi_sense_hdr *sshdr)
2110 {
2111  return scsi_normalize_sense(cmd->sense_buffer,
2112  SCSI_SENSE_BUFFERSIZE, sshdr);
2113 }
2115 
2129 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
2130  int desc_type)
2131 {
2132  int add_sen_len, add_len, desc_len, k;
2133  const u8 * descp;
2134 
2135  if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
2136  return NULL;
2137  if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
2138  return NULL;
2139  add_sen_len = (add_sen_len < (sb_len - 8)) ?
2140  add_sen_len : (sb_len - 8);
2141  descp = &sense_buffer[8];
2142  for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
2143  descp += desc_len;
2144  add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
2145  desc_len = add_len + 2;
2146  if (descp[0] == desc_type)
2147  return descp;
2148  if (add_len < 0) // short descriptor ??
2149  break;
2150  }
2151  return NULL;
2152 }
2154 
2165 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
2166  u64 * info_out)
2167 {
2168  int j;
2169  const u8 * ucp;
2170  u64 ull;
2171 
2172  if (sb_len < 7)
2173  return 0;
2174  switch (sense_buffer[0] & 0x7f) {
2175  case 0x70:
2176  case 0x71:
2177  if (sense_buffer[0] & 0x80) {
2178  *info_out = (sense_buffer[3] << 24) +
2179  (sense_buffer[4] << 16) +
2180  (sense_buffer[5] << 8) + sense_buffer[6];
2181  return 1;
2182  } else
2183  return 0;
2184  case 0x72:
2185  case 0x73:
2186  ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2187  0 /* info desc */);
2188  if (ucp && (0xa == ucp[1])) {
2189  ull = 0;
2190  for (j = 0; j < 8; ++j) {
2191  if (j > 0)
2192  ull <<= 8;
2193  ull |= ucp[4 + j];
2194  }
2195  *info_out = ull;
2196  return 1;
2197  } else
2198  return 0;
2199  default:
2200  return 0;
2201  }
2202 }
2204 
2216 {
2217  if (desc) {
2218  buf[0] = 0x72; /* descriptor, current */
2219  buf[1] = key;
2220  buf[2] = asc;
2221  buf[3] = ascq;
2222  buf[7] = 0;
2223  } else {
2224  buf[0] = 0x70; /* fixed, current */
2225  buf[2] = key;
2226  buf[7] = 0xa;
2227  buf[12] = asc;
2228  buf[13] = ascq;
2229  }
2230 }