Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scsi_lib.c
Go to the documentation of this file.
1 /*
2  * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  * SCSI queueing library.
5  * Initial versions: Eric Youngdale ([email protected]).
6  * Based upon conversations with large numbers
7  * of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h>
23 
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
31 
32 #include "scsi_priv.h"
33 #include "scsi_logging.h"
34 
35 
36 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37 #define SG_MEMPOOL_SIZE 2
38 
40  size_t size;
41  char *name;
42  struct kmem_cache *slab;
44 };
45 
46 #define SP(x) { x, "sgpool-" __stringify(x) }
47 #if (SCSI_MAX_SG_SEGMENTS < 32)
48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
49 #endif
50 static struct scsi_host_sg_pool scsi_sg_pools[] = {
51  SP(8),
52  SP(16),
53 #if (SCSI_MAX_SG_SEGMENTS > 32)
54  SP(32),
55 #if (SCSI_MAX_SG_SEGMENTS > 64)
56  SP(64),
57 #if (SCSI_MAX_SG_SEGMENTS > 128)
58  SP(128),
59 #if (SCSI_MAX_SG_SEGMENTS > 256)
60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
61 #endif
62 #endif
63 #endif
64 #endif
66 };
67 #undef SP
68 
70 
71 #ifdef CONFIG_ACPI
72 #include <acpi/acpi_bus.h>
73 
74 int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
75 {
76  bus->bus = &scsi_bus_type;
77  return register_acpi_bus_type(bus);
78 }
79 EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
80 
81 void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
82 {
84 }
85 EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
86 #endif
87 
88 /*
89  * When to reinvoke queueing after a resource shortage. It's 3 msecs to
90  * not change behaviour from the previous unplug mechanism, experimentation
91  * may prove this needs changing.
92  */
93 #define SCSI_QUEUE_DELAY 3
94 
95 /*
96  * Function: scsi_unprep_request()
97  *
98  * Purpose: Remove all preparation done for a request, including its
99  * associated scsi_cmnd, so that it can be requeued.
100  *
101  * Arguments: req - request to unprepare
102  *
103  * Lock status: Assumed that no locks are held upon entry.
104  *
105  * Returns: Nothing.
106  */
107 static void scsi_unprep_request(struct request *req)
108 {
109  struct scsi_cmnd *cmd = req->special;
110 
111  blk_unprep_request(req);
112  req->special = NULL;
113 
114  scsi_put_command(cmd);
115 }
116 
129 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
130 {
131  struct Scsi_Host *host = cmd->device->host;
132  struct scsi_device *device = cmd->device;
133  struct scsi_target *starget = scsi_target(device);
134  struct request_queue *q = device->request_queue;
135  unsigned long flags;
136 
138  printk("Inserting command %p into mlqueue\n", cmd));
139 
140  /*
141  * Set the appropriate busy bit for the device/host.
142  *
143  * If the host/device isn't busy, assume that something actually
144  * completed, and that we should be able to queue a command now.
145  *
146  * Note that the prior mid-layer assumption that any host could
147  * always queue at least one command is now broken. The mid-layer
148  * will implement a user specifiable stall (see
149  * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
150  * if a command is requeued with no other commands outstanding
151  * either for the device or for the host.
152  */
153  switch (reason) {
155  host->host_blocked = host->max_host_blocked;
156  break;
159  device->device_blocked = device->max_device_blocked;
160  break;
162  starget->target_blocked = starget->max_target_blocked;
163  break;
164  }
165 
166  /*
167  * Decrement the counters, since these commands are no longer
168  * active on the host/device.
169  */
170  if (unbusy)
171  scsi_device_unbusy(device);
172 
173  /*
174  * Requeue this command. It will go before all other commands
175  * that are already in the queue. Schedule requeue work under
176  * lock such that the kblockd_schedule_work() call happens
177  * before blk_cleanup_queue() finishes.
178  */
179  spin_lock_irqsave(q->queue_lock, flags);
180  blk_requeue_request(q, cmd->request);
181  kblockd_schedule_work(q, &device->requeue_work);
182  spin_unlock_irqrestore(q->queue_lock, flags);
183 }
184 
185 /*
186  * Function: scsi_queue_insert()
187  *
188  * Purpose: Insert a command in the midlevel queue.
189  *
190  * Arguments: cmd - command that we are adding to queue.
191  * reason - why we are inserting command to queue.
192  *
193  * Lock status: Assumed that lock is not held upon entry.
194  *
195  * Returns: Nothing.
196  *
197  * Notes: We do this for one of two cases. Either the host is busy
198  * and it cannot accept any more commands for the time being,
199  * or the device returned QUEUE_FULL and can accept no more
200  * commands.
201  * Notes: This could be called either from an interrupt context or a
202  * normal process context.
203  */
204 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
205 {
206  __scsi_queue_insert(cmd, reason, 1);
207 }
224 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
225  int data_direction, void *buffer, unsigned bufflen,
226  unsigned char *sense, int timeout, int retries, int flags,
227  int *resid)
228 {
229  struct request *req;
230  int write = (data_direction == DMA_TO_DEVICE);
231  int ret = DRIVER_ERROR << 24;
232 
233  req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
234  if (!req)
235  return ret;
236 
237  if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
238  buffer, bufflen, __GFP_WAIT))
239  goto out;
240 
241  req->cmd_len = COMMAND_SIZE(cmd[0]);
242  memcpy(req->cmd, cmd, req->cmd_len);
243  req->sense = sense;
244  req->sense_len = 0;
245  req->retries = retries;
246  req->timeout = timeout;
247  req->cmd_type = REQ_TYPE_BLOCK_PC;
248  req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
249 
250  /*
251  * head injection *required* here otherwise quiesce won't work
252  */
253  blk_execute_rq(req->q, NULL, req, 1);
254 
255  /*
256  * Some devices (USB mass-storage in particular) may transfer
257  * garbage data together with a residue indicating that the data
258  * is invalid. Prevent the garbage from being misinterpreted
259  * and prevent security leaks by zeroing out the excess data.
260  */
261  if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
262  memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
263 
264  if (resid)
265  *resid = req->resid_len;
266  ret = req->errors;
267  out:
268  blk_put_request(req);
269 
270  return ret;
271 }
273 
274 
275 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
276  int data_direction, void *buffer, unsigned bufflen,
277  struct scsi_sense_hdr *sshdr, int timeout, int retries,
278  int *resid)
279 {
280  char *sense = NULL;
281  int result;
282 
283  if (sshdr) {
284  sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
285  if (!sense)
286  return DRIVER_ERROR << 24;
287  }
288  result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
289  sense, timeout, retries, 0, resid);
290  if (sshdr)
292 
293  kfree(sense);
294  return result;
295 }
297 
298 /*
299  * Function: scsi_init_cmd_errh()
300  *
301  * Purpose: Initialize cmd fields related to error handling.
302  *
303  * Arguments: cmd - command that is ready to be queued.
304  *
305  * Notes: This function has the job of initializing a number of
306  * fields related to error handling. Typically this will
307  * be called once for each command, as required.
308  */
309 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
310 {
311  cmd->serial_number = 0;
312  scsi_set_resid(cmd, 0);
314  if (cmd->cmd_len == 0)
315  cmd->cmd_len = scsi_command_size(cmd->cmnd);
316 }
317 
318 void scsi_device_unbusy(struct scsi_device *sdev)
319 {
320  struct Scsi_Host *shost = sdev->host;
321  struct scsi_target *starget = scsi_target(sdev);
322  unsigned long flags;
323 
324  spin_lock_irqsave(shost->host_lock, flags);
325  shost->host_busy--;
326  starget->target_busy--;
327  if (unlikely(scsi_host_in_recovery(shost) &&
328  (shost->host_failed || shost->host_eh_scheduled)))
329  scsi_eh_wakeup(shost);
330  spin_unlock(shost->host_lock);
331  spin_lock(sdev->request_queue->queue_lock);
332  sdev->device_busy--;
333  spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
334 }
335 
336 /*
337  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
338  * and call blk_run_queue for all the scsi_devices on the target -
339  * including current_sdev first.
340  *
341  * Called with *no* scsi locks held.
342  */
343 static void scsi_single_lun_run(struct scsi_device *current_sdev)
344 {
345  struct Scsi_Host *shost = current_sdev->host;
346  struct scsi_device *sdev, *tmp;
347  struct scsi_target *starget = scsi_target(current_sdev);
348  unsigned long flags;
349 
350  spin_lock_irqsave(shost->host_lock, flags);
351  starget->starget_sdev_user = NULL;
352  spin_unlock_irqrestore(shost->host_lock, flags);
353 
354  /*
355  * Call blk_run_queue for all LUNs on the target, starting with
356  * current_sdev. We race with others (to set starget_sdev_user),
357  * but in most cases, we will be first. Ideally, each LU on the
358  * target would get some limited time or requests on the target.
359  */
360  blk_run_queue(current_sdev->request_queue);
361 
362  spin_lock_irqsave(shost->host_lock, flags);
363  if (starget->starget_sdev_user)
364  goto out;
365  list_for_each_entry_safe(sdev, tmp, &starget->devices,
367  if (sdev == current_sdev)
368  continue;
369  if (scsi_device_get(sdev))
370  continue;
371 
372  spin_unlock_irqrestore(shost->host_lock, flags);
374  spin_lock_irqsave(shost->host_lock, flags);
375 
376  scsi_device_put(sdev);
377  }
378  out:
379  spin_unlock_irqrestore(shost->host_lock, flags);
380 }
381 
382 static inline int scsi_device_is_busy(struct scsi_device *sdev)
383 {
384  if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
385  return 1;
386 
387  return 0;
388 }
389 
390 static inline int scsi_target_is_busy(struct scsi_target *starget)
391 {
392  return ((starget->can_queue > 0 &&
393  starget->target_busy >= starget->can_queue) ||
394  starget->target_blocked);
395 }
396 
397 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
398 {
399  if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
400  shost->host_blocked || shost->host_self_blocked)
401  return 1;
402 
403  return 0;
404 }
405 
406 /*
407  * Function: scsi_run_queue()
408  *
409  * Purpose: Select a proper request queue to serve next
410  *
411  * Arguments: q - last request's queue
412  *
413  * Returns: Nothing
414  *
415  * Notes: The previous command was completely finished, start
416  * a new one if possible.
417  */
418 static void scsi_run_queue(struct request_queue *q)
419 {
420  struct scsi_device *sdev = q->queuedata;
421  struct Scsi_Host *shost;
423  unsigned long flags;
424 
425  shost = sdev->host;
426  if (scsi_target(sdev)->single_lun)
427  scsi_single_lun_run(sdev);
428 
429  spin_lock_irqsave(shost->host_lock, flags);
430  list_splice_init(&shost->starved_list, &starved_list);
431 
432  while (!list_empty(&starved_list)) {
433  /*
434  * As long as shost is accepting commands and we have
435  * starved queues, call blk_run_queue. scsi_request_fn
436  * drops the queue_lock and can add us back to the
437  * starved_list.
438  *
439  * host_lock protects the starved_list and starved_entry.
440  * scsi_request_fn must get the host_lock before checking
441  * or modifying starved_list or starved_entry.
442  */
443  if (scsi_host_is_busy(shost))
444  break;
445 
446  sdev = list_entry(starved_list.next,
447  struct scsi_device, starved_entry);
448  list_del_init(&sdev->starved_entry);
449  if (scsi_target_is_busy(scsi_target(sdev))) {
450  list_move_tail(&sdev->starved_entry,
451  &shost->starved_list);
452  continue;
453  }
454 
455  spin_unlock(shost->host_lock);
456  spin_lock(sdev->request_queue->queue_lock);
458  spin_unlock(sdev->request_queue->queue_lock);
459  spin_lock(shost->host_lock);
460  }
461  /* put any unprocessed entries back */
462  list_splice(&starved_list, &shost->starved_list);
463  spin_unlock_irqrestore(shost->host_lock, flags);
464 
465  blk_run_queue(q);
466 }
467 
469 {
470  struct scsi_device *sdev;
471  struct request_queue *q;
472 
473  sdev = container_of(work, struct scsi_device, requeue_work);
474  q = sdev->request_queue;
475  scsi_run_queue(q);
476 }
477 
478 /*
479  * Function: scsi_requeue_command()
480  *
481  * Purpose: Handle post-processing of completed commands.
482  *
483  * Arguments: q - queue to operate on
484  * cmd - command that may need to be requeued.
485  *
486  * Returns: Nothing
487  *
488  * Notes: After command completion, there may be blocks left
489  * over which weren't finished by the previous command
490  * this can be for a number of reasons - the main one is
491  * I/O errors in the middle of the request, in which case
492  * we need to request the blocks that come after the bad
493  * sector.
494  * Notes: Upon return, cmd is a stale pointer.
495  */
496 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
497 {
498  struct scsi_device *sdev = cmd->device;
499  struct request *req = cmd->request;
500  unsigned long flags;
501 
502  /*
503  * We need to hold a reference on the device to avoid the queue being
504  * killed after the unlock and before scsi_run_queue is invoked which
505  * may happen because scsi_unprep_request() puts the command which
506  * releases its reference on the device.
507  */
508  get_device(&sdev->sdev_gendev);
509 
510  spin_lock_irqsave(q->queue_lock, flags);
511  scsi_unprep_request(req);
512  blk_requeue_request(q, req);
513  spin_unlock_irqrestore(q->queue_lock, flags);
514 
515  scsi_run_queue(q);
516 
517  put_device(&sdev->sdev_gendev);
518 }
519 
520 void scsi_next_command(struct scsi_cmnd *cmd)
521 {
522  struct scsi_device *sdev = cmd->device;
523  struct request_queue *q = sdev->request_queue;
524 
525  /* need to hold a reference on the device before we let go of the cmd */
526  get_device(&sdev->sdev_gendev);
527 
528  scsi_put_command(cmd);
529  scsi_run_queue(q);
530 
531  /* ok to remove device now */
532  put_device(&sdev->sdev_gendev);
533 }
534 
535 void scsi_run_host_queues(struct Scsi_Host *shost)
536 {
537  struct scsi_device *sdev;
538 
539  shost_for_each_device(sdev, shost)
540  scsi_run_queue(sdev->request_queue);
541 }
542 
543 static void __scsi_release_buffers(struct scsi_cmnd *, int);
544 
545 /*
546  * Function: scsi_end_request()
547  *
548  * Purpose: Post-processing of completed commands (usually invoked at end
549  * of upper level post-processing and scsi_io_completion).
550  *
551  * Arguments: cmd - command that is complete.
552  * error - 0 if I/O indicates success, < 0 for I/O error.
553  * bytes - number of bytes of completed I/O
554  * requeue - indicates whether we should requeue leftovers.
555  *
556  * Lock status: Assumed that lock is not held upon entry.
557  *
558  * Returns: cmd if requeue required, NULL otherwise.
559  *
560  * Notes: This is called for block device requests in order to
561  * mark some number of sectors as complete.
562  *
563  * We are guaranteeing that the request queue will be goosed
564  * at some point during this call.
565  * Notes: If cmd was requeued, upon return it will be a stale pointer.
566  */
567 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
568  int bytes, int requeue)
569 {
570  struct request_queue *q = cmd->device->request_queue;
571  struct request *req = cmd->request;
572 
573  /*
574  * If there are blocks left over at the end, set up the command
575  * to queue the remainder of them.
576  */
577  if (blk_end_request(req, error, bytes)) {
578  /* kill remainder if no retrys */
579  if (error && scsi_noretry_cmd(cmd))
580  blk_end_request_all(req, error);
581  else {
582  if (requeue) {
583  /*
584  * Bleah. Leftovers again. Stick the
585  * leftovers in the front of the
586  * queue, and goose the queue again.
587  */
589  scsi_requeue_command(q, cmd);
590  cmd = NULL;
591  }
592  return cmd;
593  }
594  }
595 
596  /*
597  * This will goose the queue request function at the end, so we don't
598  * need to worry about launching another command.
599  */
600  __scsi_release_buffers(cmd, 0);
601  scsi_next_command(cmd);
602  return NULL;
603 }
604 
605 static inline unsigned int scsi_sgtable_index(unsigned short nents)
606 {
607  unsigned int index;
608 
609  BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
610 
611  if (nents <= 8)
612  index = 0;
613  else
614  index = get_count_order(nents) - 3;
615 
616  return index;
617 }
618 
619 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
620 {
621  struct scsi_host_sg_pool *sgp;
622 
623  sgp = scsi_sg_pools + scsi_sgtable_index(nents);
624  mempool_free(sgl, sgp->pool);
625 }
626 
627 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
628 {
629  struct scsi_host_sg_pool *sgp;
630 
631  sgp = scsi_sg_pools + scsi_sgtable_index(nents);
632  return mempool_alloc(sgp->pool, gfp_mask);
633 }
634 
635 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
636  gfp_t gfp_mask)
637 {
638  int ret;
639 
640  BUG_ON(!nents);
641 
642  ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
643  gfp_mask, scsi_sg_alloc);
644  if (unlikely(ret))
646  scsi_sg_free);
647 
648  return ret;
649 }
650 
651 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
652 {
653  __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
654 }
655 
656 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
657 {
658 
659  if (cmd->sdb.table.nents)
660  scsi_free_sgtable(&cmd->sdb);
661 
662  memset(&cmd->sdb, 0, sizeof(cmd->sdb));
663 
664  if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
665  struct scsi_data_buffer *bidi_sdb =
666  cmd->request->next_rq->special;
667  scsi_free_sgtable(bidi_sdb);
668  kmem_cache_free(scsi_sdb_cache, bidi_sdb);
669  cmd->request->next_rq->special = NULL;
670  }
671 
672  if (scsi_prot_sg_count(cmd))
673  scsi_free_sgtable(cmd->prot_sdb);
674 }
675 
676 /*
677  * Function: scsi_release_buffers()
678  *
679  * Purpose: Completion processing for block device I/O requests.
680  *
681  * Arguments: cmd - command that we are bailing.
682  *
683  * Lock status: Assumed that no lock is held upon entry.
684  *
685  * Returns: Nothing
686  *
687  * Notes: In the event that an upper level driver rejects a
688  * command, we must release resources allocated during
689  * the __init_io() function. Primarily this would involve
690  * the scatter-gather table, and potentially any bounce
691  * buffers.
692  */
694 {
695  __scsi_release_buffers(cmd, 1);
696 }
698 
699 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
700 {
701  int error = 0;
702 
703  switch(host_byte(result)) {
705  error = -ENOLINK;
706  break;
707  case DID_TARGET_FAILURE:
708  set_host_byte(cmd, DID_OK);
709  error = -EREMOTEIO;
710  break;
711  case DID_NEXUS_FAILURE:
712  set_host_byte(cmd, DID_OK);
713  error = -EBADE;
714  break;
715  default:
716  error = -EIO;
717  break;
718  }
719 
720  return error;
721 }
722 
723 /*
724  * Function: scsi_io_completion()
725  *
726  * Purpose: Completion processing for block device I/O requests.
727  *
728  * Arguments: cmd - command that is finished.
729  *
730  * Lock status: Assumed that no lock is held upon entry.
731  *
732  * Returns: Nothing
733  *
734  * Notes: This function is matched in terms of capabilities to
735  * the function that created the scatter-gather list.
736  * In other words, if there are no bounce buffers
737  * (the normal case for most drivers), we don't need
738  * the logic to deal with cleaning up afterwards.
739  *
740  * We must call scsi_end_request(). This will finish off
741  * the specified number of sectors. If we are done, the
742  * command block will be released and the queue function
743  * will be goosed. If we are not done then we have to
744  * figure out what to do next:
745  *
746  * a) We can call scsi_requeue_command(). The request
747  * will be unprepared and put back on the queue. Then
748  * a new command will be created for it. This should
749  * be used if we made forward progress, or if we want
750  * to switch from READ(10) to READ(6) for example.
751  *
752  * b) We can call scsi_queue_insert(). The request will
753  * be put back on the queue and retried using the same
754  * command as before, possibly after a delay.
755  *
756  * c) We can call blk_end_request() with -EIO to fail
757  * the remainder of the request.
758  */
759 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
760 {
761  int result = cmd->result;
762  struct request_queue *q = cmd->device->request_queue;
763  struct request *req = cmd->request;
764  int error = 0;
765  struct scsi_sense_hdr sshdr;
766  int sense_valid = 0;
767  int sense_deferred = 0;
768  enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
769  ACTION_DELAYED_RETRY} action;
770  char *description = NULL;
771 
772  if (result) {
773  sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
774  if (sense_valid)
775  sense_deferred = scsi_sense_is_deferred(&sshdr);
776  }
777 
778  if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
779  if (result) {
780  if (sense_valid && req->sense) {
781  /*
782  * SG_IO wants current and deferred errors
783  */
784  int len = 8 + cmd->sense_buffer[7];
785 
786  if (len > SCSI_SENSE_BUFFERSIZE)
787  len = SCSI_SENSE_BUFFERSIZE;
788  memcpy(req->sense, cmd->sense_buffer, len);
789  req->sense_len = len;
790  }
791  if (!sense_deferred)
792  error = __scsi_error_from_host_byte(cmd, result);
793  }
794  /*
795  * __scsi_error_from_host_byte may have reset the host_byte
796  */
797  req->errors = cmd->result;
798 
799  req->resid_len = scsi_get_resid(cmd);
800 
801  if (scsi_bidi_cmnd(cmd)) {
802  /*
803  * Bidi commands Must be complete as a whole,
804  * both sides at once.
805  */
806  req->next_rq->resid_len = scsi_in(cmd)->resid;
807 
809  blk_end_request_all(req, 0);
810 
811  scsi_next_command(cmd);
812  return;
813  }
814  }
815 
816  /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
817  BUG_ON(blk_bidi_rq(req));
818 
819  /*
820  * Next deal with any sectors which we were able to correctly
821  * handle.
822  */
823  SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
824  "%d bytes done.\n",
825  blk_rq_sectors(req), good_bytes));
826 
827  /*
828  * Recovered errors need reporting, but they're always treated
829  * as success, so fiddle the result code here. For BLOCK_PC
830  * we already took a copy of the original into rq->errors which
831  * is what gets returned to the user
832  */
833  if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
834  /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
835  * print since caller wants ATA registers. Only occurs on
836  * SCSI ATA PASS_THROUGH commands when CK_COND=1
837  */
838  if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
839  ;
840  else if (!(req->cmd_flags & REQ_QUIET))
841  scsi_print_sense("", cmd);
842  result = 0;
843  /* BLOCK_PC may have set error */
844  error = 0;
845  }
846 
847  /*
848  * A number of bytes were successfully read. If there
849  * are leftovers and there is some kind of error
850  * (result != 0), retry the rest.
851  */
852  if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
853  return;
854 
855  error = __scsi_error_from_host_byte(cmd, result);
856 
857  if (host_byte(result) == DID_RESET) {
858  /* Third party bus reset or reset for error recovery
859  * reasons. Just retry the command and see what
860  * happens.
861  */
862  action = ACTION_RETRY;
863  } else if (sense_valid && !sense_deferred) {
864  switch (sshdr.sense_key) {
865  case UNIT_ATTENTION:
866  if (cmd->device->removable) {
867  /* Detected disc change. Set a bit
868  * and quietly refuse further access.
869  */
870  cmd->device->changed = 1;
871  description = "Media Changed";
872  action = ACTION_FAIL;
873  } else {
874  /* Must have been a power glitch, or a
875  * bus reset. Could not have been a
876  * media change, so we just retry the
877  * command and see what happens.
878  */
879  action = ACTION_RETRY;
880  }
881  break;
882  case ILLEGAL_REQUEST:
883  /* If we had an ILLEGAL REQUEST returned, then
884  * we may have performed an unsupported
885  * command. The only thing this should be
886  * would be a ten byte read where only a six
887  * byte read was supported. Also, on a system
888  * where READ CAPACITY failed, we may have
889  * read past the end of the disk.
890  */
891  if ((cmd->device->use_10_for_rw &&
892  sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
893  (cmd->cmnd[0] == READ_10 ||
894  cmd->cmnd[0] == WRITE_10)) {
895  /* This will issue a new 6-byte command. */
896  cmd->device->use_10_for_rw = 0;
897  action = ACTION_REPREP;
898  } else if (sshdr.asc == 0x10) /* DIX */ {
899  description = "Host Data Integrity Failure";
900  action = ACTION_FAIL;
901  error = -EILSEQ;
902  /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
903  } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
904  switch (cmd->cmnd[0]) {
905  case UNMAP:
906  description = "Discard failure";
907  break;
908  case WRITE_SAME:
909  case WRITE_SAME_16:
910  if (cmd->cmnd[1] & 0x8)
911  description = "Discard failure";
912  else
913  description =
914  "Write same failure";
915  break;
916  default:
917  description = "Invalid command failure";
918  break;
919  }
920  action = ACTION_FAIL;
921  error = -EREMOTEIO;
922  } else
923  action = ACTION_FAIL;
924  break;
925  case ABORTED_COMMAND:
926  action = ACTION_FAIL;
927  if (sshdr.asc == 0x10) { /* DIF */
928  description = "Target Data Integrity Failure";
929  error = -EILSEQ;
930  }
931  break;
932  case NOT_READY:
933  /* If the device is in the process of becoming
934  * ready, or has a temporary blockage, retry.
935  */
936  if (sshdr.asc == 0x04) {
937  switch (sshdr.ascq) {
938  case 0x01: /* becoming ready */
939  case 0x04: /* format in progress */
940  case 0x05: /* rebuild in progress */
941  case 0x06: /* recalculation in progress */
942  case 0x07: /* operation in progress */
943  case 0x08: /* Long write in progress */
944  case 0x09: /* self test in progress */
945  case 0x14: /* space allocation in progress */
946  action = ACTION_DELAYED_RETRY;
947  break;
948  default:
949  description = "Device not ready";
950  action = ACTION_FAIL;
951  break;
952  }
953  } else {
954  description = "Device not ready";
955  action = ACTION_FAIL;
956  }
957  break;
958  case VOLUME_OVERFLOW:
959  /* See SSC3rXX or current. */
960  action = ACTION_FAIL;
961  break;
962  default:
963  description = "Unhandled sense code";
964  action = ACTION_FAIL;
965  break;
966  }
967  } else {
968  description = "Unhandled error code";
969  action = ACTION_FAIL;
970  }
971 
972  switch (action) {
973  case ACTION_FAIL:
974  /* Give up and fail the remainder of the request */
976  if (!(req->cmd_flags & REQ_QUIET)) {
977  if (description)
978  scmd_printk(KERN_INFO, cmd, "%s\n",
979  description);
980  scsi_print_result(cmd);
981  if (driver_byte(result) & DRIVER_SENSE)
982  scsi_print_sense("", cmd);
983  scsi_print_command(cmd);
984  }
985  if (blk_end_request_err(req, error))
986  scsi_requeue_command(q, cmd);
987  else
988  scsi_next_command(cmd);
989  break;
990  case ACTION_REPREP:
991  /* Unprep the request and put it back at the head of the queue.
992  * A new command will be prepared and issued.
993  */
995  scsi_requeue_command(q, cmd);
996  break;
997  case ACTION_RETRY:
998  /* Retry the same command immediately */
999  __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1000  break;
1001  case ACTION_DELAYED_RETRY:
1002  /* Retry the same command after a delay */
1003  __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1004  break;
1005  }
1006 }
1007 
1008 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1009  gfp_t gfp_mask)
1010 {
1011  int count;
1012 
1013  /*
1014  * If sg table allocation fails, requeue request later.
1015  */
1016  if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1017  gfp_mask))) {
1018  return BLKPREP_DEFER;
1019  }
1020 
1021  req->buffer = NULL;
1022 
1023  /*
1024  * Next, walk the list, and fill in the addresses and sizes of
1025  * each segment.
1026  */
1027  count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1028  BUG_ON(count > sdb->table.nents);
1029  sdb->table.nents = count;
1030  sdb->length = blk_rq_bytes(req);
1031  return BLKPREP_OK;
1032 }
1033 
1034 /*
1035  * Function: scsi_init_io()
1036  *
1037  * Purpose: SCSI I/O initialize function.
1038  *
1039  * Arguments: cmd - Command descriptor we wish to initialize
1040  *
1041  * Returns: 0 on success
1042  * BLKPREP_DEFER if the failure is retryable
1043  * BLKPREP_KILL if the failure is fatal
1044  */
1045 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1046 {
1047  struct request *rq = cmd->request;
1048 
1049  int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1050  if (error)
1051  goto err_exit;
1052 
1053  if (blk_bidi_rq(rq)) {
1054  struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1055  scsi_sdb_cache, GFP_ATOMIC);
1056  if (!bidi_sdb) {
1057  error = BLKPREP_DEFER;
1058  goto err_exit;
1059  }
1060 
1061  rq->next_rq->special = bidi_sdb;
1062  error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1063  if (error)
1064  goto err_exit;
1065  }
1066 
1067  if (blk_integrity_rq(rq)) {
1068  struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1069  int ivecs, count;
1070 
1071  BUG_ON(prot_sdb == NULL);
1072  ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1073 
1074  if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1075  error = BLKPREP_DEFER;
1076  goto err_exit;
1077  }
1078 
1079  count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1080  prot_sdb->table.sgl);
1081  BUG_ON(unlikely(count > ivecs));
1082  BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1083 
1084  cmd->prot_sdb = prot_sdb;
1085  cmd->prot_sdb->table.nents = count;
1086  }
1087 
1088  return BLKPREP_OK ;
1089 
1090 err_exit:
1091  scsi_release_buffers(cmd);
1092  cmd->request->special = NULL;
1093  scsi_put_command(cmd);
1094  return error;
1095 }
1097 
1098 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1099  struct request *req)
1100 {
1101  struct scsi_cmnd *cmd;
1102 
1103  if (!req->special) {
1104  cmd = scsi_get_command(sdev, GFP_ATOMIC);
1105  if (unlikely(!cmd))
1106  return NULL;
1107  req->special = cmd;
1108  } else {
1109  cmd = req->special;
1110  }
1111 
1112  /* pull a tag out of the request if we have one */
1113  cmd->tag = req->tag;
1114  cmd->request = req;
1115 
1116  cmd->cmnd = req->cmd;
1117  cmd->prot_op = SCSI_PROT_NORMAL;
1118 
1119  return cmd;
1120 }
1121 
1122 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1123 {
1124  struct scsi_cmnd *cmd;
1125  int ret = scsi_prep_state_check(sdev, req);
1126 
1127  if (ret != BLKPREP_OK)
1128  return ret;
1129 
1130  cmd = scsi_get_cmd_from_req(sdev, req);
1131  if (unlikely(!cmd))
1132  return BLKPREP_DEFER;
1133 
1134  /*
1135  * BLOCK_PC requests may transfer data, in which case they must
1136  * a bio attached to them. Or they might contain a SCSI command
1137  * that does not transfer data, in which case they may optionally
1138  * submit a request without an attached bio.
1139  */
1140  if (req->bio) {
1141  int ret;
1142 
1143  BUG_ON(!req->nr_phys_segments);
1144 
1145  ret = scsi_init_io(cmd, GFP_ATOMIC);
1146  if (unlikely(ret))
1147  return ret;
1148  } else {
1149  BUG_ON(blk_rq_bytes(req));
1150 
1151  memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1152  req->buffer = NULL;
1153  }
1154 
1155  cmd->cmd_len = req->cmd_len;
1156  if (!blk_rq_bytes(req))
1157  cmd->sc_data_direction = DMA_NONE;
1158  else if (rq_data_dir(req) == WRITE)
1160  else
1162 
1163  cmd->transfersize = blk_rq_bytes(req);
1164  cmd->allowed = req->retries;
1165  return BLKPREP_OK;
1166 }
1168 
1169 /*
1170  * Setup a REQ_TYPE_FS command. These are simple read/write request
1171  * from filesystems that still need to be translated to SCSI CDBs from
1172  * the ULD.
1173  */
1174 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1175 {
1176  struct scsi_cmnd *cmd;
1177  int ret = scsi_prep_state_check(sdev, req);
1178 
1179  if (ret != BLKPREP_OK)
1180  return ret;
1181 
1182  if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1183  && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1184  ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1185  if (ret != BLKPREP_OK)
1186  return ret;
1187  }
1188 
1189  /*
1190  * Filesystem requests must transfer data.
1191  */
1192  BUG_ON(!req->nr_phys_segments);
1193 
1194  cmd = scsi_get_cmd_from_req(sdev, req);
1195  if (unlikely(!cmd))
1196  return BLKPREP_DEFER;
1197 
1198  memset(cmd->cmnd, 0, BLK_MAX_CDB);
1199  return scsi_init_io(cmd, GFP_ATOMIC);
1200 }
1202 
1203 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1204 {
1205  int ret = BLKPREP_OK;
1206 
1207  /*
1208  * If the device is not in running state we will reject some
1209  * or all commands.
1210  */
1211  if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1212  switch (sdev->sdev_state) {
1213  case SDEV_OFFLINE:
1215  /*
1216  * If the device is offline we refuse to process any
1217  * commands. The device must be brought online
1218  * before trying any recovery commands.
1219  */
1220  sdev_printk(KERN_ERR, sdev,
1221  "rejecting I/O to offline device\n");
1222  ret = BLKPREP_KILL;
1223  break;
1224  case SDEV_DEL:
1225  /*
1226  * If the device is fully deleted, we refuse to
1227  * process any commands as well.
1228  */
1229  sdev_printk(KERN_ERR, sdev,
1230  "rejecting I/O to dead device\n");
1231  ret = BLKPREP_KILL;
1232  break;
1233  case SDEV_QUIESCE:
1234  case SDEV_BLOCK:
1235  case SDEV_CREATED_BLOCK:
1236  /*
1237  * If the devices is blocked we defer normal commands.
1238  */
1239  if (!(req->cmd_flags & REQ_PREEMPT))
1240  ret = BLKPREP_DEFER;
1241  break;
1242  default:
1243  /*
1244  * For any other not fully online state we only allow
1245  * special commands. In particular any user initiated
1246  * command is not allowed.
1247  */
1248  if (!(req->cmd_flags & REQ_PREEMPT))
1249  ret = BLKPREP_KILL;
1250  break;
1251  }
1252  }
1253  return ret;
1254 }
1256 
1257 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1258 {
1259  struct scsi_device *sdev = q->queuedata;
1260 
1261  switch (ret) {
1262  case BLKPREP_KILL:
1263  req->errors = DID_NO_CONNECT << 16;
1264  /* release the command and kill it */
1265  if (req->special) {
1266  struct scsi_cmnd *cmd = req->special;
1267  scsi_release_buffers(cmd);
1268  scsi_put_command(cmd);
1269  req->special = NULL;
1270  }
1271  break;
1272  case BLKPREP_DEFER:
1273  /*
1274  * If we defer, the blk_peek_request() returns NULL, but the
1275  * queue must be restarted, so we schedule a callback to happen
1276  * shortly.
1277  */
1278  if (sdev->device_busy == 0)
1280  break;
1281  default:
1282  req->cmd_flags |= REQ_DONTPREP;
1283  }
1284 
1285  return ret;
1286 }
1288 
1289 int scsi_prep_fn(struct request_queue *q, struct request *req)
1290 {
1291  struct scsi_device *sdev = q->queuedata;
1292  int ret = BLKPREP_KILL;
1293 
1294  if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1295  ret = scsi_setup_blk_pc_cmnd(sdev, req);
1296  return scsi_prep_return(q, req, ret);
1297 }
1299 
1300 /*
1301  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1302  * return 0.
1303  *
1304  * Called with the queue_lock held.
1305  */
1306 static inline int scsi_dev_queue_ready(struct request_queue *q,
1307  struct scsi_device *sdev)
1308 {
1309  if (sdev->device_busy == 0 && sdev->device_blocked) {
1310  /*
1311  * unblock after device_blocked iterates to zero
1312  */
1313  if (--sdev->device_blocked == 0) {
1314  SCSI_LOG_MLQUEUE(3,
1315  sdev_printk(KERN_INFO, sdev,
1316  "unblocking device at zero depth\n"));
1317  } else {
1319  return 0;
1320  }
1321  }
1322  if (scsi_device_is_busy(sdev))
1323  return 0;
1324 
1325  return 1;
1326 }
1327 
1328 
1329 /*
1330  * scsi_target_queue_ready: checks if there we can send commands to target
1331  * @sdev: scsi device on starget to check.
1332  *
1333  * Called with the host lock held.
1334  */
1335 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1336  struct scsi_device *sdev)
1337 {
1338  struct scsi_target *starget = scsi_target(sdev);
1339 
1340  if (starget->single_lun) {
1341  if (starget->starget_sdev_user &&
1342  starget->starget_sdev_user != sdev)
1343  return 0;
1344  starget->starget_sdev_user = sdev;
1345  }
1346 
1347  if (starget->target_busy == 0 && starget->target_blocked) {
1348  /*
1349  * unblock after target_blocked iterates to zero
1350  */
1351  if (--starget->target_blocked == 0) {
1353  "unblocking target at zero depth\n"));
1354  } else
1355  return 0;
1356  }
1357 
1358  if (scsi_target_is_busy(starget)) {
1359  list_move_tail(&sdev->starved_entry, &shost->starved_list);
1360  return 0;
1361  }
1362 
1363  return 1;
1364 }
1365 
1366 /*
1367  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1368  * return 0. We must end up running the queue again whenever 0 is
1369  * returned, else IO can hang.
1370  *
1371  * Called with host_lock held.
1372  */
1373 static inline int scsi_host_queue_ready(struct request_queue *q,
1374  struct Scsi_Host *shost,
1375  struct scsi_device *sdev)
1376 {
1377  if (scsi_host_in_recovery(shost))
1378  return 0;
1379  if (shost->host_busy == 0 && shost->host_blocked) {
1380  /*
1381  * unblock after host_blocked iterates to zero
1382  */
1383  if (--shost->host_blocked == 0) {
1384  SCSI_LOG_MLQUEUE(3,
1385  printk("scsi%d unblocking host at zero depth\n",
1386  shost->host_no));
1387  } else {
1388  return 0;
1389  }
1390  }
1391  if (scsi_host_is_busy(shost)) {
1392  if (list_empty(&sdev->starved_entry))
1393  list_add_tail(&sdev->starved_entry, &shost->starved_list);
1394  return 0;
1395  }
1396 
1397  /* We're OK to process the command, so we can't be starved */
1398  if (!list_empty(&sdev->starved_entry))
1399  list_del_init(&sdev->starved_entry);
1400 
1401  return 1;
1402 }
1403 
1404 /*
1405  * Busy state exporting function for request stacking drivers.
1406  *
1407  * For efficiency, no lock is taken to check the busy state of
1408  * shost/starget/sdev, since the returned value is not guaranteed and
1409  * may be changed after request stacking drivers call the function,
1410  * regardless of taking lock or not.
1411  *
1412  * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1413  * needs to return 'not busy'. Otherwise, request stacking drivers
1414  * may hold requests forever.
1415  */
1416 static int scsi_lld_busy(struct request_queue *q)
1417 {
1418  struct scsi_device *sdev = q->queuedata;
1419  struct Scsi_Host *shost;
1420 
1421  if (blk_queue_dead(q))
1422  return 0;
1423 
1424  shost = sdev->host;
1425 
1426  /*
1427  * Ignore host/starget busy state.
1428  * Since block layer does not have a concept of fairness across
1429  * multiple queues, congestion of host/starget needs to be handled
1430  * in SCSI layer.
1431  */
1432  if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1433  return 1;
1434 
1435  return 0;
1436 }
1437 
1438 /*
1439  * Kill a request for a dead device
1440  */
1441 static void scsi_kill_request(struct request *req, struct request_queue *q)
1442 {
1443  struct scsi_cmnd *cmd = req->special;
1444  struct scsi_device *sdev;
1445  struct scsi_target *starget;
1446  struct Scsi_Host *shost;
1447 
1448  blk_start_request(req);
1449 
1450  scmd_printk(KERN_INFO, cmd, "killing request\n");
1451 
1452  sdev = cmd->device;
1453  starget = scsi_target(sdev);
1454  shost = sdev->host;
1455  scsi_init_cmd_errh(cmd);
1456  cmd->result = DID_NO_CONNECT << 16;
1457  atomic_inc(&cmd->device->iorequest_cnt);
1458 
1459  /*
1460  * SCSI request completion path will do scsi_device_unbusy(),
1461  * bump busy counts. To bump the counters, we need to dance
1462  * with the locks as normal issue path does.
1463  */
1464  sdev->device_busy++;
1465  spin_unlock(sdev->request_queue->queue_lock);
1466  spin_lock(shost->host_lock);
1467  shost->host_busy++;
1468  starget->target_busy++;
1469  spin_unlock(shost->host_lock);
1470  spin_lock(sdev->request_queue->queue_lock);
1471 
1472  blk_complete_request(req);
1473 }
1474 
1475 static void scsi_softirq_done(struct request *rq)
1476 {
1477  struct scsi_cmnd *cmd = rq->special;
1478  unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1479  int disposition;
1480 
1481  INIT_LIST_HEAD(&cmd->eh_entry);
1482 
1483  atomic_inc(&cmd->device->iodone_cnt);
1484  if (cmd->result)
1485  atomic_inc(&cmd->device->ioerr_cnt);
1486 
1487  disposition = scsi_decide_disposition(cmd);
1488  if (disposition != SUCCESS &&
1489  time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1490  sdev_printk(KERN_ERR, cmd->device,
1491  "timing out command, waited %lus\n",
1492  wait_for/HZ);
1493  disposition = SUCCESS;
1494  }
1495 
1496  scsi_log_completion(cmd, disposition);
1497 
1498  switch (disposition) {
1499  case SUCCESS:
1500  scsi_finish_command(cmd);
1501  break;
1502  case NEEDS_RETRY:
1504  break;
1505  case ADD_TO_MLQUEUE:
1507  break;
1508  default:
1509  if (!scsi_eh_scmd_add(cmd, 0))
1510  scsi_finish_command(cmd);
1511  }
1512 }
1513 
1514 /*
1515  * Function: scsi_request_fn()
1516  *
1517  * Purpose: Main strategy routine for SCSI.
1518  *
1519  * Arguments: q - Pointer to actual queue.
1520  *
1521  * Returns: Nothing
1522  *
1523  * Lock status: IO request lock assumed to be held when called.
1524  */
1525 static void scsi_request_fn(struct request_queue *q)
1526 {
1527  struct scsi_device *sdev = q->queuedata;
1528  struct Scsi_Host *shost;
1529  struct scsi_cmnd *cmd;
1530  struct request *req;
1531 
1532  if(!get_device(&sdev->sdev_gendev))
1533  /* We must be tearing the block queue down already */
1534  return;
1535 
1536  /*
1537  * To start with, we keep looping until the queue is empty, or until
1538  * the host is no longer able to accept any more requests.
1539  */
1540  shost = sdev->host;
1541  for (;;) {
1542  int rtn;
1543  /*
1544  * get next queueable request. We do this early to make sure
1545  * that the request is fully prepared even if we cannot
1546  * accept it.
1547  */
1548  req = blk_peek_request(q);
1549  if (!req || !scsi_dev_queue_ready(q, sdev))
1550  break;
1551 
1552  if (unlikely(!scsi_device_online(sdev))) {
1553  sdev_printk(KERN_ERR, sdev,
1554  "rejecting I/O to offline device\n");
1555  scsi_kill_request(req, q);
1556  continue;
1557  }
1558 
1559 
1560  /*
1561  * Remove the request from the request list.
1562  */
1563  if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1564  blk_start_request(req);
1565  sdev->device_busy++;
1566 
1567  spin_unlock(q->queue_lock);
1568  cmd = req->special;
1569  if (unlikely(cmd == NULL)) {
1570  printk(KERN_CRIT "impossible request in %s.\n"
1571  "please mail a stack trace to "
1572  "[email protected]\n",
1573  __func__);
1574  blk_dump_rq_flags(req, "foo");
1575  BUG();
1576  }
1577  spin_lock(shost->host_lock);
1578 
1579  /*
1580  * We hit this when the driver is using a host wide
1581  * tag map. For device level tag maps the queue_depth check
1582  * in the device ready fn would prevent us from trying
1583  * to allocate a tag. Since the map is a shared host resource
1584  * we add the dev to the starved list so it eventually gets
1585  * a run when a tag is freed.
1586  */
1587  if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1588  if (list_empty(&sdev->starved_entry))
1590  &shost->starved_list);
1591  goto not_ready;
1592  }
1593 
1594  if (!scsi_target_queue_ready(shost, sdev))
1595  goto not_ready;
1596 
1597  if (!scsi_host_queue_ready(q, shost, sdev))
1598  goto not_ready;
1599 
1600  scsi_target(sdev)->target_busy++;
1601  shost->host_busy++;
1602 
1603  /*
1604  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1605  * take the lock again.
1606  */
1607  spin_unlock_irq(shost->host_lock);
1608 
1609  /*
1610  * Finally, initialize any error handling parameters, and set up
1611  * the timers for timeouts.
1612  */
1613  scsi_init_cmd_errh(cmd);
1614 
1615  /*
1616  * Dispatch the command to the low-level driver.
1617  */
1618  rtn = scsi_dispatch_cmd(cmd);
1619  spin_lock_irq(q->queue_lock);
1620  if (rtn)
1621  goto out_delay;
1622  }
1623 
1624  goto out;
1625 
1626  not_ready:
1627  spin_unlock_irq(shost->host_lock);
1628 
1629  /*
1630  * lock q, handle tag, requeue req, and decrement device_busy. We
1631  * must return with queue_lock held.
1632  *
1633  * Decrementing device_busy without checking it is OK, as all such
1634  * cases (host limits or settings) should run the queue at some
1635  * later time.
1636  */
1637  spin_lock_irq(q->queue_lock);
1638  blk_requeue_request(q, req);
1639  sdev->device_busy--;
1640 out_delay:
1641  if (sdev->device_busy == 0)
1643 out:
1644  /* must be careful here...if we trigger the ->remove() function
1645  * we cannot be holding the q lock */
1646  spin_unlock_irq(q->queue_lock);
1647  put_device(&sdev->sdev_gendev);
1648  spin_lock_irq(q->queue_lock);
1649 }
1650 
1652 {
1653  struct device *host_dev;
1654  u64 bounce_limit = 0xffffffff;
1655 
1656  if (shost->unchecked_isa_dma)
1657  return BLK_BOUNCE_ISA;
1658  /*
1659  * Platforms with virtual-DMA translation
1660  * hardware have no practical limit.
1661  */
1662  if (!PCI_DMA_BUS_IS_PHYS)
1663  return BLK_BOUNCE_ANY;
1664 
1665  host_dev = scsi_get_device(shost);
1666  if (host_dev && host_dev->dma_mask)
1667  bounce_limit = *host_dev->dma_mask;
1668 
1669  return bounce_limit;
1670 }
1672 
1674  request_fn_proc *request_fn)
1675 {
1676  struct request_queue *q;
1677  struct device *dev = shost->dma_dev;
1678 
1679  q = blk_init_queue(request_fn, NULL);
1680  if (!q)
1681  return NULL;
1682 
1683  /*
1684  * this limit is imposed by hardware restrictions
1685  */
1686  blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1688 
1689  if (scsi_host_prot_dma(shost)) {
1690  shost->sg_prot_tablesize =
1692  (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1693  BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1694  blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1695  }
1696 
1700  dma_set_seg_boundary(dev, shost->dma_boundary);
1701 
1702  blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1703 
1704  if (!shost->use_clustering)
1705  q->limits.cluster = 0;
1706 
1707  /*
1708  * set a reasonable default alignment on word boundaries: the
1709  * host and device may alter it using
1710  * blk_queue_update_dma_alignment() later.
1711  */
1712  blk_queue_dma_alignment(q, 0x03);
1713 
1714  return q;
1715 }
1717 
1719 {
1720  struct request_queue *q;
1721 
1722  q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1723  if (!q)
1724  return NULL;
1725 
1727  blk_queue_softirq_done(q, scsi_softirq_done);
1729  blk_queue_lld_busy(q, scsi_lld_busy);
1730  return q;
1731 }
1732 
1733 /*
1734  * Function: scsi_block_requests()
1735  *
1736  * Purpose: Utility function used by low-level drivers to prevent further
1737  * commands from being queued to the device.
1738  *
1739  * Arguments: shost - Host in question
1740  *
1741  * Returns: Nothing
1742  *
1743  * Lock status: No locks are assumed held.
1744  *
1745  * Notes: There is no timer nor any other means by which the requests
1746  * get unblocked other than the low-level driver calling
1747  * scsi_unblock_requests().
1748  */
1749 void scsi_block_requests(struct Scsi_Host *shost)
1750 {
1751  shost->host_self_blocked = 1;
1752 }
1754 
1755 /*
1756  * Function: scsi_unblock_requests()
1757  *
1758  * Purpose: Utility function used by low-level drivers to allow further
1759  * commands from being queued to the device.
1760  *
1761  * Arguments: shost - Host in question
1762  *
1763  * Returns: Nothing
1764  *
1765  * Lock status: No locks are assumed held.
1766  *
1767  * Notes: There is no timer nor any other means by which the requests
1768  * get unblocked other than the low-level driver calling
1769  * scsi_unblock_requests().
1770  *
1771  * This is done as an API function so that changes to the
1772  * internals of the scsi mid-layer won't require wholesale
1773  * changes to drivers that use this feature.
1774  */
1776 {
1777  shost->host_self_blocked = 0;
1778  scsi_run_host_queues(shost);
1779 }
1781 
1783 {
1784  int i;
1785 
1786  scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1787  sizeof(struct scsi_data_buffer),
1788  0, 0, NULL);
1789  if (!scsi_sdb_cache) {
1790  printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1791  return -ENOMEM;
1792  }
1793 
1794  for (i = 0; i < SG_MEMPOOL_NR; i++) {
1795  struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1796  int size = sgp->size * sizeof(struct scatterlist);
1797 
1798  sgp->slab = kmem_cache_create(sgp->name, size, 0,
1800  if (!sgp->slab) {
1801  printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1802  sgp->name);
1803  goto cleanup_sdb;
1804  }
1805 
1806  sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1807  sgp->slab);
1808  if (!sgp->pool) {
1809  printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1810  sgp->name);
1811  goto cleanup_sdb;
1812  }
1813  }
1814 
1815  return 0;
1816 
1817 cleanup_sdb:
1818  for (i = 0; i < SG_MEMPOOL_NR; i++) {
1819  struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1820  if (sgp->pool)
1821  mempool_destroy(sgp->pool);
1822  if (sgp->slab)
1823  kmem_cache_destroy(sgp->slab);
1824  }
1825  kmem_cache_destroy(scsi_sdb_cache);
1826 
1827  return -ENOMEM;
1828 }
1829 
1831 {
1832  int i;
1833 
1834  kmem_cache_destroy(scsi_sdb_cache);
1835 
1836  for (i = 0; i < SG_MEMPOOL_NR; i++) {
1837  struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1838  mempool_destroy(sgp->pool);
1839  kmem_cache_destroy(sgp->slab);
1840  }
1841 }
1842 
1861 int
1862 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1863  unsigned char *buffer, int len, int timeout, int retries,
1864  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1865 {
1866  unsigned char cmd[10];
1867  unsigned char *real_buffer;
1868  int ret;
1869 
1870  memset(cmd, 0, sizeof(cmd));
1871  cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1872 
1873  if (sdev->use_10_for_ms) {
1874  if (len > 65535)
1875  return -EINVAL;
1876  real_buffer = kmalloc(8 + len, GFP_KERNEL);
1877  if (!real_buffer)
1878  return -ENOMEM;
1879  memcpy(real_buffer + 8, buffer, len);
1880  len += 8;
1881  real_buffer[0] = 0;
1882  real_buffer[1] = 0;
1883  real_buffer[2] = data->medium_type;
1884  real_buffer[3] = data->device_specific;
1885  real_buffer[4] = data->longlba ? 0x01 : 0;
1886  real_buffer[5] = 0;
1887  real_buffer[6] = data->block_descriptor_length >> 8;
1888  real_buffer[7] = data->block_descriptor_length;
1889 
1890  cmd[0] = MODE_SELECT_10;
1891  cmd[7] = len >> 8;
1892  cmd[8] = len;
1893  } else {
1894  if (len > 255 || data->block_descriptor_length > 255 ||
1895  data->longlba)
1896  return -EINVAL;
1897 
1898  real_buffer = kmalloc(4 + len, GFP_KERNEL);
1899  if (!real_buffer)
1900  return -ENOMEM;
1901  memcpy(real_buffer + 4, buffer, len);
1902  len += 4;
1903  real_buffer[0] = 0;
1904  real_buffer[1] = data->medium_type;
1905  real_buffer[2] = data->device_specific;
1906  real_buffer[3] = data->block_descriptor_length;
1907 
1908 
1909  cmd[0] = MODE_SELECT;
1910  cmd[4] = len;
1911  }
1912 
1913  ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1914  sshdr, timeout, retries, NULL);
1915  kfree(real_buffer);
1916  return ret;
1917 }
1919 
1937 int
1938 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1939  unsigned char *buffer, int len, int timeout, int retries,
1940  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1941 {
1942  unsigned char cmd[12];
1943  int use_10_for_ms;
1944  int header_length;
1945  int result;
1946  struct scsi_sense_hdr my_sshdr;
1947 
1948  memset(data, 0, sizeof(*data));
1949  memset(&cmd[0], 0, 12);
1950  cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1951  cmd[2] = modepage;
1952 
1953  /* caller might not be interested in sense, but we need it */
1954  if (!sshdr)
1955  sshdr = &my_sshdr;
1956 
1957  retry:
1958  use_10_for_ms = sdev->use_10_for_ms;
1959 
1960  if (use_10_for_ms) {
1961  if (len < 8)
1962  len = 8;
1963 
1964  cmd[0] = MODE_SENSE_10;
1965  cmd[8] = len;
1966  header_length = 8;
1967  } else {
1968  if (len < 4)
1969  len = 4;
1970 
1971  cmd[0] = MODE_SENSE;
1972  cmd[4] = len;
1973  header_length = 4;
1974  }
1975 
1976  memset(buffer, 0, len);
1977 
1978  result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1979  sshdr, timeout, retries, NULL);
1980 
1981  /* This code looks awful: what it's doing is making sure an
1982  * ILLEGAL REQUEST sense return identifies the actual command
1983  * byte as the problem. MODE_SENSE commands can return
1984  * ILLEGAL REQUEST if the code page isn't supported */
1985 
1986  if (use_10_for_ms && !scsi_status_is_good(result) &&
1987  (driver_byte(result) & DRIVER_SENSE)) {
1988  if (scsi_sense_valid(sshdr)) {
1989  if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1990  (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1991  /*
1992  * Invalid command operation code
1993  */
1994  sdev->use_10_for_ms = 0;
1995  goto retry;
1996  }
1997  }
1998  }
1999 
2000  if(scsi_status_is_good(result)) {
2001  if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2002  (modepage == 6 || modepage == 8))) {
2003  /* Initio breakage? */
2004  header_length = 0;
2005  data->length = 13;
2006  data->medium_type = 0;
2007  data->device_specific = 0;
2008  data->longlba = 0;
2009  data->block_descriptor_length = 0;
2010  } else if(use_10_for_ms) {
2011  data->length = buffer[0]*256 + buffer[1] + 2;
2012  data->medium_type = buffer[2];
2013  data->device_specific = buffer[3];
2014  data->longlba = buffer[4] & 0x01;
2015  data->block_descriptor_length = buffer[6]*256
2016  + buffer[7];
2017  } else {
2018  data->length = buffer[0] + 1;
2019  data->medium_type = buffer[1];
2020  data->device_specific = buffer[2];
2021  data->block_descriptor_length = buffer[3];
2022  }
2023  data->header_length = header_length;
2024  }
2025 
2026  return result;
2027 }
2029 
2042 int
2043 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2044  struct scsi_sense_hdr *sshdr_external)
2045 {
2046  char cmd[] = {
2047  TEST_UNIT_READY, 0, 0, 0, 0, 0,
2048  };
2049  struct scsi_sense_hdr *sshdr;
2050  int result;
2051 
2052  if (!sshdr_external)
2053  sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2054  else
2055  sshdr = sshdr_external;
2056 
2057  /* try to eat the UNIT_ATTENTION if there are enough retries */
2058  do {
2059  result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2060  timeout, retries, NULL);
2061  if (sdev->removable && scsi_sense_valid(sshdr) &&
2062  sshdr->sense_key == UNIT_ATTENTION)
2063  sdev->changed = 1;
2064  } while (scsi_sense_valid(sshdr) &&
2065  sshdr->sense_key == UNIT_ATTENTION && --retries);
2066 
2067  if (!sshdr_external)
2068  kfree(sshdr);
2069  return result;
2070 }
2072 
2081 int
2083 {
2084  enum scsi_device_state oldstate = sdev->sdev_state;
2085 
2086  if (state == oldstate)
2087  return 0;
2088 
2089  switch (state) {
2090  case SDEV_CREATED:
2091  switch (oldstate) {
2092  case SDEV_CREATED_BLOCK:
2093  break;
2094  default:
2095  goto illegal;
2096  }
2097  break;
2098 
2099  case SDEV_RUNNING:
2100  switch (oldstate) {
2101  case SDEV_CREATED:
2102  case SDEV_OFFLINE:
2104  case SDEV_QUIESCE:
2105  case SDEV_BLOCK:
2106  break;
2107  default:
2108  goto illegal;
2109  }
2110  break;
2111 
2112  case SDEV_QUIESCE:
2113  switch (oldstate) {
2114  case SDEV_RUNNING:
2115  case SDEV_OFFLINE:
2117  break;
2118  default:
2119  goto illegal;
2120  }
2121  break;
2122 
2123  case SDEV_OFFLINE:
2125  switch (oldstate) {
2126  case SDEV_CREATED:
2127  case SDEV_RUNNING:
2128  case SDEV_QUIESCE:
2129  case SDEV_BLOCK:
2130  break;
2131  default:
2132  goto illegal;
2133  }
2134  break;
2135 
2136  case SDEV_BLOCK:
2137  switch (oldstate) {
2138  case SDEV_RUNNING:
2139  case SDEV_CREATED_BLOCK:
2140  break;
2141  default:
2142  goto illegal;
2143  }
2144  break;
2145 
2146  case SDEV_CREATED_BLOCK:
2147  switch (oldstate) {
2148  case SDEV_CREATED:
2149  break;
2150  default:
2151  goto illegal;
2152  }
2153  break;
2154 
2155  case SDEV_CANCEL:
2156  switch (oldstate) {
2157  case SDEV_CREATED:
2158  case SDEV_RUNNING:
2159  case SDEV_QUIESCE:
2160  case SDEV_OFFLINE:
2162  case SDEV_BLOCK:
2163  break;
2164  default:
2165  goto illegal;
2166  }
2167  break;
2168 
2169  case SDEV_DEL:
2170  switch (oldstate) {
2171  case SDEV_CREATED:
2172  case SDEV_RUNNING:
2173  case SDEV_OFFLINE:
2175  case SDEV_CANCEL:
2176  break;
2177  default:
2178  goto illegal;
2179  }
2180  break;
2181 
2182  }
2183  sdev->sdev_state = state;
2184  return 0;
2185 
2186  illegal:
2188  sdev_printk(KERN_ERR, sdev,
2189  "Illegal state transition %s->%s\n",
2190  scsi_device_state_name(oldstate),
2191  scsi_device_state_name(state))
2192  );
2193  return -EINVAL;
2194 }
2196 
2204 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2205 {
2206  int idx = 0;
2207  char *envp[3];
2208 
2209  switch (evt->evt_type) {
2210  case SDEV_EVT_MEDIA_CHANGE:
2211  envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2212  break;
2213 
2214  default:
2215  /* do nothing */
2216  break;
2217  }
2218 
2219  envp[idx++] = NULL;
2220 
2221  kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2222 }
2223 
2232 {
2233  struct scsi_device *sdev;
2235 
2236  sdev = container_of(work, struct scsi_device, event_work);
2237 
2238  while (1) {
2239  struct scsi_event *evt;
2240  struct list_head *this, *tmp;
2241  unsigned long flags;
2242 
2243  spin_lock_irqsave(&sdev->list_lock, flags);
2244  list_splice_init(&sdev->event_list, &event_list);
2245  spin_unlock_irqrestore(&sdev->list_lock, flags);
2246 
2247  if (list_empty(&event_list))
2248  break;
2249 
2250  list_for_each_safe(this, tmp, &event_list) {
2251  evt = list_entry(this, struct scsi_event, node);
2252  list_del(&evt->node);
2253  scsi_evt_emit(sdev, evt);
2254  kfree(evt);
2255  }
2256  }
2257 }
2258 
2266 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2267 {
2268  unsigned long flags;
2269 
2270 #if 0
2271  /* FIXME: currently this check eliminates all media change events
2272  * for polled devices. Need to update to discriminate between AN
2273  * and polled events */
2274  if (!test_bit(evt->evt_type, sdev->supported_events)) {
2275  kfree(evt);
2276  return;
2277  }
2278 #endif
2279 
2280  spin_lock_irqsave(&sdev->list_lock, flags);
2281  list_add_tail(&evt->node, &sdev->event_list);
2282  schedule_work(&sdev->event_work);
2283  spin_unlock_irqrestore(&sdev->list_lock, flags);
2284 }
2286 
2295  gfp_t gfpflags)
2296 {
2297  struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2298  if (!evt)
2299  return NULL;
2300 
2301  evt->evt_type = evt_type;
2302  INIT_LIST_HEAD(&evt->node);
2303 
2304  /* evt_type-specific initialization, if any */
2305  switch (evt_type) {
2306  case SDEV_EVT_MEDIA_CHANGE:
2307  default:
2308  /* do nothing */
2309  break;
2310  }
2311 
2312  return evt;
2313 }
2315 
2325  enum scsi_device_event evt_type, gfp_t gfpflags)
2326 {
2327  struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2328  if (!evt) {
2329  sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2330  evt_type);
2331  return;
2332  }
2333 
2334  sdev_evt_send(sdev, evt);
2335 }
2337 
2353 int
2355 {
2356  int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2357  if (err)
2358  return err;
2359 
2360  scsi_run_queue(sdev->request_queue);
2361  while (sdev->device_busy) {
2362  msleep_interruptible(200);
2363  scsi_run_queue(sdev->request_queue);
2364  }
2365  return 0;
2366 }
2368 
2379 {
2380  /* check if the device state was mutated prior to resume, and if
2381  * so assume the state is being managed elsewhere (for example
2382  * device deleted during suspend)
2383  */
2384  if (sdev->sdev_state != SDEV_QUIESCE ||
2386  return;
2387  scsi_run_queue(sdev->request_queue);
2388 }
2390 
2391 static void
2392 device_quiesce_fn(struct scsi_device *sdev, void *data)
2393 {
2394  scsi_device_quiesce(sdev);
2395 }
2396 
2397 void
2399 {
2400  starget_for_each_device(starget, NULL, device_quiesce_fn);
2401 }
2403 
2404 static void
2405 device_resume_fn(struct scsi_device *sdev, void *data)
2406 {
2407  scsi_device_resume(sdev);
2408 }
2409 
2410 void
2412 {
2413  starget_for_each_device(starget, NULL, device_resume_fn);
2414 }
2416 
2433 int
2435 {
2436  struct request_queue *q = sdev->request_queue;
2437  unsigned long flags;
2438  int err = 0;
2439 
2440  err = scsi_device_set_state(sdev, SDEV_BLOCK);
2441  if (err) {
2443 
2444  if (err)
2445  return err;
2446  }
2447 
2448  /*
2449  * The device has transitioned to SDEV_BLOCK. Stop the
2450  * block layer from calling the midlayer with this device's
2451  * request queue.
2452  */
2453  spin_lock_irqsave(q->queue_lock, flags);
2454  blk_stop_queue(q);
2455  spin_unlock_irqrestore(q->queue_lock, flags);
2456 
2457  return 0;
2458 }
2460 
2477 int
2480 {
2481  struct request_queue *q = sdev->request_queue;
2482  unsigned long flags;
2483 
2484  /*
2485  * Try to transition the scsi device to SDEV_RUNNING or one of the
2486  * offlined states and goose the device queue if successful.
2487  */
2488  if ((sdev->sdev_state == SDEV_BLOCK) ||
2490  sdev->sdev_state = new_state;
2491  else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2492  if (new_state == SDEV_TRANSPORT_OFFLINE ||
2493  new_state == SDEV_OFFLINE)
2494  sdev->sdev_state = new_state;
2495  else
2496  sdev->sdev_state = SDEV_CREATED;
2497  } else if (sdev->sdev_state != SDEV_CANCEL &&
2498  sdev->sdev_state != SDEV_OFFLINE)
2499  return -EINVAL;
2500 
2501  spin_lock_irqsave(q->queue_lock, flags);
2502  blk_start_queue(q);
2503  spin_unlock_irqrestore(q->queue_lock, flags);
2504 
2505  return 0;
2506 }
2508 
2509 static void
2510 device_block(struct scsi_device *sdev, void *data)
2511 {
2513 }
2514 
2515 static int
2516 target_block(struct device *dev, void *data)
2517 {
2518  if (scsi_is_target_device(dev))
2520  device_block);
2521  return 0;
2522 }
2523 
2524 void
2525 scsi_target_block(struct device *dev)
2526 {
2527  if (scsi_is_target_device(dev))
2529  device_block);
2530  else
2531  device_for_each_child(dev, NULL, target_block);
2532 }
2534 
2535 static void
2536 device_unblock(struct scsi_device *sdev, void *data)
2537 {
2538  scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2539 }
2540 
2541 static int
2542 target_unblock(struct device *dev, void *data)
2543 {
2544  if (scsi_is_target_device(dev))
2546  device_unblock);
2547  return 0;
2548 }
2549 
2550 void
2552 {
2553  if (scsi_is_target_device(dev))
2554  starget_for_each_device(to_scsi_target(dev), &new_state,
2555  device_unblock);
2556  else
2557  device_for_each_child(dev, &new_state, target_unblock);
2558 }
2560 
2571  size_t *offset, size_t *len)
2572 {
2573  int i;
2574  size_t sg_len = 0, len_complete = 0;
2575  struct scatterlist *sg;
2576  struct page *page;
2577 
2578  WARN_ON(!irqs_disabled());
2579 
2580  for_each_sg(sgl, sg, sg_count, i) {
2581  len_complete = sg_len; /* Complete sg-entries */
2582  sg_len += sg->length;
2583  if (sg_len > *offset)
2584  break;
2585  }
2586 
2587  if (unlikely(i == sg_count)) {
2588  printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2589  "elements %d\n",
2590  __func__, sg_len, *offset, sg_count);
2591  WARN_ON(1);
2592  return NULL;
2593  }
2594 
2595  /* Offset starting from the beginning of first page in this sg-entry */
2596  *offset = *offset - len_complete + sg->offset;
2597 
2598  /* Assumption: contiguous pages can be accessed as "page + i" */
2599  page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2600  *offset &= ~PAGE_MASK;
2601 
2602  /* Bytes in this sg-entry from *offset to the end of the page */
2603  sg_len = PAGE_SIZE - *offset;
2604  if (*len > sg_len)
2605  *len = sg_len;
2606 
2607  return kmap_atomic(page);
2608 }
2610 
2615 void scsi_kunmap_atomic_sg(void *virt)
2616 {
2617  kunmap_atomic(virt);
2618 }