Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scsi_sysfs.c
Go to the documentation of this file.
1 /*
2  * scsi_sysfs.c
3  *
4  * SCSI sysfs interface routines.
5  *
6  * Created to pull SCSI mid layer sysfs routines into one file.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/device.h>
14 #include <linux/pm_runtime.h>
15 
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_transport.h>
21 #include <scsi/scsi_driver.h>
22 
23 #include "scsi_priv.h"
24 #include "scsi_logging.h"
25 
26 static struct device_type scsi_dev_type;
27 
28 static const struct {
30  char *name;
31 } sdev_states[] = {
32  { SDEV_CREATED, "created" },
33  { SDEV_RUNNING, "running" },
34  { SDEV_CANCEL, "cancel" },
35  { SDEV_DEL, "deleted" },
36  { SDEV_QUIESCE, "quiesce" },
37  { SDEV_OFFLINE, "offline" },
38  { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
39  { SDEV_BLOCK, "blocked" },
40  { SDEV_CREATED_BLOCK, "created-blocked" },
41 };
42 
44 {
45  int i;
46  char *name = NULL;
47 
48  for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
49  if (sdev_states[i].value == state) {
50  name = sdev_states[i].name;
51  break;
52  }
53  }
54  return name;
55 }
56 
57 static const struct {
59  char *name;
60 } shost_states[] = {
61  { SHOST_CREATED, "created" },
62  { SHOST_RUNNING, "running" },
63  { SHOST_CANCEL, "cancel" },
64  { SHOST_DEL, "deleted" },
65  { SHOST_RECOVERY, "recovery" },
66  { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
67  { SHOST_DEL_RECOVERY, "deleted/recovery", },
68 };
70 {
71  int i;
72  char *name = NULL;
73 
74  for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
75  if (shost_states[i].value == state) {
76  name = shost_states[i].name;
77  break;
78  }
79  }
80  return name;
81 }
82 
83 static int check_set(unsigned int *val, char *src)
84 {
85  char *last;
86 
87  if (strncmp(src, "-", 20) == 0) {
88  *val = SCAN_WILD_CARD;
89  } else {
90  /*
91  * Doesn't check for int overflow
92  */
93  *val = simple_strtoul(src, &last, 0);
94  if (*last != '\0')
95  return 1;
96  }
97  return 0;
98 }
99 
100 static int scsi_scan(struct Scsi_Host *shost, const char *str)
101 {
102  char s1[15], s2[15], s3[15], junk;
103  unsigned int channel, id, lun;
104  int res;
105 
106  res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk);
107  if (res != 3)
108  return -EINVAL;
109  if (check_set(&channel, s1))
110  return -EINVAL;
111  if (check_set(&id, s2))
112  return -EINVAL;
113  if (check_set(&lun, s3))
114  return -EINVAL;
115  if (shost->transportt->user_scan)
116  res = shost->transportt->user_scan(shost, channel, id, lun);
117  else
118  res = scsi_scan_host_selected(shost, channel, id, lun, 1);
119  return res;
120 }
121 
122 /*
123  * shost_show_function: macro to create an attr function that can be used to
124  * show a non-bit field.
125  */
126 #define shost_show_function(name, field, format_string) \
127 static ssize_t \
128 show_##name (struct device *dev, struct device_attribute *attr, \
129  char *buf) \
130 { \
131  struct Scsi_Host *shost = class_to_shost(dev); \
132  return snprintf (buf, 20, format_string, shost->field); \
133 }
134 
135 /*
136  * shost_rd_attr: macro to create a function and attribute variable for a
137  * read only field.
138  */
139 #define shost_rd_attr2(name, field, format_string) \
140  shost_show_function(name, field, format_string) \
141 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
142 
143 #define shost_rd_attr(field, format_string) \
144 shost_rd_attr2(field, field, format_string)
145 
146 /*
147  * Create the actual show/store functions and data structures.
148  */
149 
150 static ssize_t
151 store_scan(struct device *dev, struct device_attribute *attr,
152  const char *buf, size_t count)
153 {
154  struct Scsi_Host *shost = class_to_shost(dev);
155  int res;
156 
157  res = scsi_scan(shost, buf);
158  if (res == 0)
159  res = count;
160  return res;
161 };
162 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
163 
164 static ssize_t
165 store_shost_state(struct device *dev, struct device_attribute *attr,
166  const char *buf, size_t count)
167 {
168  int i;
169  struct Scsi_Host *shost = class_to_shost(dev);
170  enum scsi_host_state state = 0;
171 
172  for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
173  const int len = strlen(shost_states[i].name);
174  if (strncmp(shost_states[i].name, buf, len) == 0 &&
175  buf[len] == '\n') {
176  state = shost_states[i].value;
177  break;
178  }
179  }
180  if (!state)
181  return -EINVAL;
182 
183  if (scsi_host_set_state(shost, state))
184  return -EINVAL;
185  return count;
186 }
187 
188 static ssize_t
189 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
190 {
191  struct Scsi_Host *shost = class_to_shost(dev);
192  const char *name = scsi_host_state_name(shost->shost_state);
193 
194  if (!name)
195  return -EINVAL;
196 
197  return snprintf(buf, 20, "%s\n", name);
198 }
199 
200 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
202  __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
203 
204 static ssize_t
205 show_shost_mode(unsigned int mode, char *buf)
206 {
207  ssize_t len = 0;
208 
209  if (mode & MODE_INITIATOR)
210  len = sprintf(buf, "%s", "Initiator");
211 
212  if (mode & MODE_TARGET)
213  len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
214 
215  len += sprintf(buf + len, "\n");
216 
217  return len;
218 }
219 
220 static ssize_t
221 show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
222  char *buf)
223 {
224  struct Scsi_Host *shost = class_to_shost(dev);
225  unsigned int supported_mode = shost->hostt->supported_mode;
226 
227  if (supported_mode == MODE_UNKNOWN)
228  /* by default this should be initiator */
229  supported_mode = MODE_INITIATOR;
230 
231  return show_shost_mode(supported_mode, buf);
232 }
233 
234 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
235 
236 static ssize_t
237 show_shost_active_mode(struct device *dev,
238  struct device_attribute *attr, char *buf)
239 {
240  struct Scsi_Host *shost = class_to_shost(dev);
241 
242  if (shost->active_mode == MODE_UNKNOWN)
243  return snprintf(buf, 20, "unknown\n");
244  else
245  return show_shost_mode(shost->active_mode, buf);
246 }
247 
248 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
249 
250 static int check_reset_type(char *str)
251 {
252  if (strncmp(str, "adapter", 10) == 0)
253  return SCSI_ADAPTER_RESET;
254  else if (strncmp(str, "firmware", 10) == 0)
255  return SCSI_FIRMWARE_RESET;
256  else
257  return 0;
258 }
259 
260 static ssize_t
261 store_host_reset(struct device *dev, struct device_attribute *attr,
262  const char *buf, size_t count)
263 {
264  struct Scsi_Host *shost = class_to_shost(dev);
265  struct scsi_host_template *sht = shost->hostt;
266  int ret = -EINVAL;
267  char str[10];
268  int type;
269 
270  sscanf(buf, "%s", str);
271  type = check_reset_type(str);
272 
273  if (!type)
274  goto exit_store_host_reset;
275 
276  if (sht->host_reset)
277  ret = sht->host_reset(shost, type);
278 
279 exit_store_host_reset:
280  if (ret == 0)
281  ret = count;
282  return ret;
283 }
284 
285 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
286 
287 shost_rd_attr(unique_id, "%u\n");
288 shost_rd_attr(host_busy, "%hu\n");
289 shost_rd_attr(cmd_per_lun, "%hd\n");
290 shost_rd_attr(can_queue, "%hd\n");
291 shost_rd_attr(sg_tablesize, "%hu\n");
294 shost_rd_attr(prot_capabilities, "%u\n");
295 shost_rd_attr(prot_guard_type, "%hd\n");
296 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
297 
298 static struct attribute *scsi_sysfs_shost_attrs[] = {
299  &dev_attr_unique_id.attr,
300  &dev_attr_host_busy.attr,
301  &dev_attr_cmd_per_lun.attr,
302  &dev_attr_can_queue.attr,
303  &dev_attr_sg_tablesize.attr,
304  &dev_attr_sg_prot_tablesize.attr,
305  &dev_attr_unchecked_isa_dma.attr,
306  &dev_attr_proc_name.attr,
307  &dev_attr_scan.attr,
308  &dev_attr_hstate.attr,
309  &dev_attr_supported_mode.attr,
310  &dev_attr_active_mode.attr,
311  &dev_attr_prot_capabilities.attr,
312  &dev_attr_prot_guard_type.attr,
313  &dev_attr_host_reset.attr,
314  NULL
315 };
316 
318  .attrs = scsi_sysfs_shost_attrs,
319 };
320 
323  NULL
324 };
325 
326 static void scsi_device_cls_release(struct device *class_dev)
327 {
328  struct scsi_device *sdev;
329 
330  sdev = class_to_sdev(class_dev);
331  put_device(&sdev->sdev_gendev);
332 }
333 
334 static void scsi_device_dev_release_usercontext(struct work_struct *work)
335 {
336  struct scsi_device *sdev;
337  struct device *parent;
338  struct scsi_target *starget;
339  struct list_head *this, *tmp;
340  unsigned long flags;
341 
342  sdev = container_of(work, struct scsi_device, ew.work);
343 
344  parent = sdev->sdev_gendev.parent;
345  starget = to_scsi_target(parent);
346 
347  spin_lock_irqsave(sdev->host->host_lock, flags);
348  starget->reap_ref++;
349  list_del(&sdev->siblings);
351  list_del(&sdev->starved_entry);
352  spin_unlock_irqrestore(sdev->host->host_lock, flags);
353 
355 
356  list_for_each_safe(this, tmp, &sdev->event_list) {
357  struct scsi_event *evt;
358 
359  evt = list_entry(this, struct scsi_event, node);
360  list_del(&evt->node);
361  kfree(evt);
362  }
363 
365  /* NULL queue means the device can't be used */
366  sdev->request_queue = NULL;
367 
369 
370  kfree(sdev->inquiry);
371  kfree(sdev);
372 
373  if (parent)
374  put_device(parent);
375 }
376 
377 static void scsi_device_dev_release(struct device *dev)
378 {
379  struct scsi_device *sdp = to_scsi_device(dev);
380  execute_in_process_context(scsi_device_dev_release_usercontext,
381  &sdp->ew);
382 }
383 
384 static struct class sdev_class = {
385  .name = "scsi_device",
386  .dev_release = scsi_device_cls_release,
387 };
388 
389 /* all probing is done in the individual ->probe routines */
390 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
391 {
392  struct scsi_device *sdp;
393 
394  if (dev->type != &scsi_dev_type)
395  return 0;
396 
397  sdp = to_scsi_device(dev);
398  if (sdp->no_uld_attach)
399  return 0;
400  return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
401 }
402 
403 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
404 {
405  struct scsi_device *sdev;
406 
407  if (dev->type != &scsi_dev_type)
408  return 0;
409 
410  sdev = to_scsi_device(dev);
411 
412  add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
413  return 0;
414 }
415 
417  .name = "scsi",
418  .match = scsi_bus_match,
419  .uevent = scsi_bus_uevent,
420 #ifdef CONFIG_PM
421  .pm = &scsi_bus_pm_ops,
422 #endif
423 };
424 EXPORT_SYMBOL_GPL(scsi_bus_type);
425 
427 {
428  int error;
429 
430  error = bus_register(&scsi_bus_type);
431  if (!error) {
432  error = class_register(&sdev_class);
433  if (error)
434  bus_unregister(&scsi_bus_type);
435  }
436 
437  return error;
438 }
439 
441 {
442  class_unregister(&sdev_class);
443  bus_unregister(&scsi_bus_type);
444 }
445 
446 /*
447  * sdev_show_function: macro to create an attr function that can be used to
448  * show a non-bit field.
449  */
450 #define sdev_show_function(field, format_string) \
451 static ssize_t \
452 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
453  char *buf) \
454 { \
455  struct scsi_device *sdev; \
456  sdev = to_scsi_device(dev); \
457  return snprintf (buf, 20, format_string, sdev->field); \
458 } \
459 
460 /*
461  * sdev_rd_attr: macro to create a function and attribute variable for a
462  * read only field.
463  */
464 #define sdev_rd_attr(field, format_string) \
465  sdev_show_function(field, format_string) \
466 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
467 
468 
469 /*
470  * sdev_rw_attr: create a function and attribute variable for a
471  * read/write field.
472  */
473 #define sdev_rw_attr(field, format_string) \
474  sdev_show_function(field, format_string) \
475  \
476 static ssize_t \
477 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
478  const char *buf, size_t count) \
479 { \
480  struct scsi_device *sdev; \
481  sdev = to_scsi_device(dev); \
482  sscanf (buf, format_string, &sdev->field); \
483  return count; \
484 } \
485 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
486 
487 /* Currently we don't export bit fields, but we might in future,
488  * so leave this code in */
489 #if 0
490 /*
491  * sdev_rd_attr: create a function and attribute variable for a
492  * read/write bit field.
493  */
494 #define sdev_rw_attr_bit(field) \
495  sdev_show_function(field, "%d\n") \
496  \
497 static ssize_t \
498 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
499  const char *buf, size_t count) \
500 { \
501  int ret; \
502  struct scsi_device *sdev; \
503  ret = scsi_sdev_check_buf_bit(buf); \
504  if (ret >= 0) { \
505  sdev = to_scsi_device(dev); \
506  sdev->field = ret; \
507  ret = count; \
508  } \
509  return ret; \
510 } \
511 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
512 
513 /*
514  * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
515  * else return -EINVAL.
516  */
517 static int scsi_sdev_check_buf_bit(const char *buf)
518 {
519  if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
520  if (buf[0] == '1')
521  return 1;
522  else if (buf[0] == '0')
523  return 0;
524  else
525  return -EINVAL;
526  } else
527  return -EINVAL;
528 }
529 #endif
530 /*
531  * Create the actual show/store functions and data structures.
532  */
533 sdev_rd_attr (device_blocked, "%d\n");
534 sdev_rd_attr (queue_depth, "%d\n");
535 sdev_rd_attr (type, "%d\n");
536 sdev_rd_attr (scsi_level, "%d\n");
537 sdev_rd_attr (vendor, "%.8s\n");
538 sdev_rd_attr (model, "%.16s\n");
539 sdev_rd_attr (rev, "%.4s\n");
540 
541 /*
542  * TODO: can we make these symlinks to the block layer ones?
543  */
544 static ssize_t
545 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
546 {
547  struct scsi_device *sdev;
548  sdev = to_scsi_device(dev);
549  return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
550 }
551 
552 static ssize_t
553 sdev_store_timeout (struct device *dev, struct device_attribute *attr,
554  const char *buf, size_t count)
555 {
556  struct scsi_device *sdev;
557  int timeout;
558  sdev = to_scsi_device(dev);
559  sscanf (buf, "%d\n", &timeout);
560  blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
561  return count;
562 }
563 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
564 
565 static ssize_t
566 store_rescan_field (struct device *dev, struct device_attribute *attr,
567  const char *buf, size_t count)
568 {
569  scsi_rescan_device(dev);
570  return count;
571 }
572 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
573 
574 static void sdev_store_delete_callback(struct device *dev)
575 {
577 }
578 
579 static ssize_t
580 sdev_store_delete(struct device *dev, struct device_attribute *attr,
581  const char *buf, size_t count)
582 {
583  int rc;
584 
585  /* An attribute cannot be unregistered by one of its own methods,
586  * so we have to use this roundabout approach.
587  */
588  rc = device_schedule_callback(dev, sdev_store_delete_callback);
589  if (rc)
590  count = rc;
591  return count;
592 };
593 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
594 
595 static ssize_t
596 store_state_field(struct device *dev, struct device_attribute *attr,
597  const char *buf, size_t count)
598 {
599  int i;
600  struct scsi_device *sdev = to_scsi_device(dev);
601  enum scsi_device_state state = 0;
602 
603  for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
604  const int len = strlen(sdev_states[i].name);
605  if (strncmp(sdev_states[i].name, buf, len) == 0 &&
606  buf[len] == '\n') {
607  state = sdev_states[i].value;
608  break;
609  }
610  }
611  if (!state)
612  return -EINVAL;
613 
614  if (scsi_device_set_state(sdev, state))
615  return -EINVAL;
616  return count;
617 }
618 
619 static ssize_t
620 show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
621 {
622  struct scsi_device *sdev = to_scsi_device(dev);
623  const char *name = scsi_device_state_name(sdev->sdev_state);
624 
625  if (!name)
626  return -EINVAL;
627 
628  return snprintf(buf, 20, "%s\n", name);
629 }
630 
631 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
632 
633 static ssize_t
634 show_queue_type_field(struct device *dev, struct device_attribute *attr,
635  char *buf)
636 {
637  struct scsi_device *sdev = to_scsi_device(dev);
638  const char *name = "none";
639 
640  if (sdev->ordered_tags)
641  name = "ordered";
642  else if (sdev->simple_tags)
643  name = "simple";
644 
645  return snprintf(buf, 20, "%s\n", name);
646 }
647 
648 static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
649 
650 static ssize_t
651 show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf)
652 {
653  return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
654 }
655 
656 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
657 
658 #define show_sdev_iostat(field) \
659 static ssize_t \
660 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
661  char *buf) \
662 { \
663  struct scsi_device *sdev = to_scsi_device(dev); \
664  unsigned long long count = atomic_read(&sdev->field); \
665  return snprintf(buf, 20, "0x%llx\n", count); \
666 } \
667 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
668 
672 
673 static ssize_t
674 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
675 {
676  struct scsi_device *sdev;
677  sdev = to_scsi_device(dev);
678  return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
679 }
680 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
681 
682 #define DECLARE_EVT_SHOW(name, Cap_name) \
683 static ssize_t \
684 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
685  char *buf) \
686 { \
687  struct scsi_device *sdev = to_scsi_device(dev); \
688  int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
689  return snprintf(buf, 20, "%d\n", val); \
690 }
691 
692 #define DECLARE_EVT_STORE(name, Cap_name) \
693 static ssize_t \
694 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
695  const char *buf, size_t count) \
696 { \
697  struct scsi_device *sdev = to_scsi_device(dev); \
698  int val = simple_strtoul(buf, NULL, 0); \
699  if (val == 0) \
700  clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
701  else if (val == 1) \
702  set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
703  else \
704  return -EINVAL; \
705  return count; \
706 }
707 
708 #define DECLARE_EVT(name, Cap_name) \
709  DECLARE_EVT_SHOW(name, Cap_name) \
710  DECLARE_EVT_STORE(name, Cap_name) \
711  static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
712  sdev_store_evt_##name);
713 #define REF_EVT(name) &dev_attr_evt_##name.attr
714 
715 DECLARE_EVT(media_change, MEDIA_CHANGE)
716 
717 /* Default template for device attributes. May NOT be modified */
718 static struct attribute *scsi_sdev_attrs[] = {
719  &dev_attr_device_blocked.attr,
720  &dev_attr_type.attr,
721  &dev_attr_scsi_level.attr,
722  &dev_attr_vendor.attr,
723  &dev_attr_model.attr,
724  &dev_attr_rev.attr,
725  &dev_attr_rescan.attr,
726  &dev_attr_delete.attr,
727  &dev_attr_state.attr,
728  &dev_attr_timeout.attr,
729  &dev_attr_iocounterbits.attr,
730  &dev_attr_iorequest_cnt.attr,
731  &dev_attr_iodone_cnt.attr,
732  &dev_attr_ioerr_cnt.attr,
733  &dev_attr_modalias.attr,
734  REF_EVT(media_change),
735  NULL
736 };
737 
738 static struct attribute_group scsi_sdev_attr_group = {
739  .attrs = scsi_sdev_attrs,
740 };
741 
742 static const struct attribute_group *scsi_sdev_attr_groups[] = {
743  &scsi_sdev_attr_group,
744  NULL
745 };
746 
747 static ssize_t
748 sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
749  const char *buf, size_t count)
750 {
751  int depth, retval;
752  struct scsi_device *sdev = to_scsi_device(dev);
753  struct scsi_host_template *sht = sdev->host->hostt;
754 
755  if (!sht->change_queue_depth)
756  return -EINVAL;
757 
758  depth = simple_strtoul(buf, NULL, 0);
759 
760  if (depth < 1)
761  return -EINVAL;
762 
763  retval = sht->change_queue_depth(sdev, depth,
765  if (retval < 0)
766  return retval;
767 
768  sdev->max_queue_depth = sdev->queue_depth;
769 
770  return count;
771 }
772 
773 static struct device_attribute sdev_attr_queue_depth_rw =
774  __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
775  sdev_store_queue_depth_rw);
776 
777 static ssize_t
778 sdev_show_queue_ramp_up_period(struct device *dev,
779  struct device_attribute *attr,
780  char *buf)
781 {
782  struct scsi_device *sdev;
783  sdev = to_scsi_device(dev);
784  return snprintf(buf, 20, "%u\n",
786 }
787 
788 static ssize_t
789 sdev_store_queue_ramp_up_period(struct device *dev,
790  struct device_attribute *attr,
791  const char *buf, size_t count)
792 {
793  struct scsi_device *sdev = to_scsi_device(dev);
794  unsigned long period;
795 
796  if (strict_strtoul(buf, 10, &period))
797  return -EINVAL;
798 
799  sdev->queue_ramp_up_period = msecs_to_jiffies(period);
800  return period;
801 }
802 
803 static struct device_attribute sdev_attr_queue_ramp_up_period =
805  sdev_show_queue_ramp_up_period,
806  sdev_store_queue_ramp_up_period);
807 
808 static ssize_t
809 sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
810  const char *buf, size_t count)
811 {
812  struct scsi_device *sdev = to_scsi_device(dev);
813  struct scsi_host_template *sht = sdev->host->hostt;
814  int tag_type = 0, retval;
815  int prev_tag_type = scsi_get_tag_type(sdev);
816 
817  if (!sdev->tagged_supported || !sht->change_queue_type)
818  return -EINVAL;
819 
820  if (strncmp(buf, "ordered", 7) == 0)
821  tag_type = MSG_ORDERED_TAG;
822  else if (strncmp(buf, "simple", 6) == 0)
823  tag_type = MSG_SIMPLE_TAG;
824  else if (strncmp(buf, "none", 4) != 0)
825  return -EINVAL;
826 
827  if (tag_type == prev_tag_type)
828  return count;
829 
830  retval = sht->change_queue_type(sdev, tag_type);
831  if (retval < 0)
832  return retval;
833 
834  return count;
835 }
836 
837 static int scsi_target_add(struct scsi_target *starget)
838 {
839  int error;
840 
841  if (starget->state != STARGET_CREATED)
842  return 0;
843 
844  error = device_add(&starget->dev);
845  if (error) {
846  dev_err(&starget->dev, "target device_add failed, error %d\n", error);
847  return error;
848  }
849  transport_add_device(&starget->dev);
850  starget->state = STARGET_RUNNING;
851 
852  pm_runtime_set_active(&starget->dev);
853  pm_runtime_enable(&starget->dev);
854  device_enable_async_suspend(&starget->dev);
855 
856  return 0;
857 }
858 
859 static struct device_attribute sdev_attr_queue_type_rw =
860  __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
861  sdev_store_queue_type_rw);
862 
871 {
872  int error, i;
873  struct request_queue *rq = sdev->request_queue;
874  struct scsi_target *starget = sdev->sdev_target;
875 
876  error = scsi_device_set_state(sdev, SDEV_RUNNING);
877  if (error)
878  return error;
879 
880  error = scsi_target_add(starget);
881  if (error)
882  return error;
883 
884  transport_configure_device(&starget->dev);
885 
886  device_enable_async_suspend(&sdev->sdev_gendev);
887  scsi_autopm_get_target(starget);
888  pm_runtime_set_active(&sdev->sdev_gendev);
889  pm_runtime_forbid(&sdev->sdev_gendev);
890  pm_runtime_enable(&sdev->sdev_gendev);
891  scsi_autopm_put_target(starget);
892 
893  /* The following call will keep sdev active indefinitely, until
894  * its driver does a corresponding scsi_autopm_pm_device(). Only
895  * drivers supporting autosuspend will do this.
896  */
897  scsi_autopm_get_device(sdev);
898 
899  error = device_add(&sdev->sdev_gendev);
900  if (error) {
901  sdev_printk(KERN_INFO, sdev,
902  "failed to add device: %d\n", error);
903  return error;
904  }
905  device_enable_async_suspend(&sdev->sdev_dev);
906  error = device_add(&sdev->sdev_dev);
907  if (error) {
908  sdev_printk(KERN_INFO, sdev,
909  "failed to add class device: %d\n", error);
910  device_del(&sdev->sdev_gendev);
911  return error;
912  }
913  transport_add_device(&sdev->sdev_gendev);
914  sdev->is_visible = 1;
915 
916  /* create queue files, which may be writable, depending on the host */
917  if (sdev->host->hostt->change_queue_depth) {
918  error = device_create_file(&sdev->sdev_gendev,
919  &sdev_attr_queue_depth_rw);
920  error = device_create_file(&sdev->sdev_gendev,
921  &sdev_attr_queue_ramp_up_period);
922  }
923  else
924  error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
925  if (error)
926  return error;
927 
928  if (sdev->host->hostt->change_queue_type)
929  error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
930  else
931  error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
932  if (error)
933  return error;
934 
935  error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
936 
937  if (error)
938  /* we're treating error on bsg register as non-fatal,
939  * so pretend nothing went wrong */
940  sdev_printk(KERN_INFO, sdev,
941  "Failed to register bsg queue, errno=%d\n", error);
942 
943  /* add additional host specific attributes */
944  if (sdev->host->hostt->sdev_attrs) {
945  for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
946  error = device_create_file(&sdev->sdev_gendev,
947  sdev->host->hostt->sdev_attrs[i]);
948  if (error)
949  return error;
950  }
951  }
952 
953  return error;
954 }
955 
957 {
958  struct device *dev = &sdev->sdev_gendev;
959 
960  if (sdev->is_visible) {
961  if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
962  return;
963 
965  device_unregister(&sdev->sdev_dev);
967  device_del(dev);
968  } else
969  put_device(&sdev->sdev_dev);
970 
971  /*
972  * Stop accepting new requests and wait until all queuecommand() and
973  * scsi_run_queue() invocations have finished before tearing down the
974  * device.
975  */
979 
980  if (sdev->host->hostt->slave_destroy)
981  sdev->host->hostt->slave_destroy(sdev);
983 
984  put_device(dev);
985 }
986 
991 void scsi_remove_device(struct scsi_device *sdev)
992 {
993  struct Scsi_Host *shost = sdev->host;
994 
995  mutex_lock(&shost->scan_mutex);
996  __scsi_remove_device(sdev);
997  mutex_unlock(&shost->scan_mutex);
998 }
1000 
1001 static void __scsi_remove_target(struct scsi_target *starget)
1002 {
1003  struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1004  unsigned long flags;
1005  struct scsi_device *sdev;
1006 
1007  spin_lock_irqsave(shost->host_lock, flags);
1008  restart:
1009  list_for_each_entry(sdev, &shost->__devices, siblings) {
1010  if (sdev->channel != starget->channel ||
1011  sdev->id != starget->id ||
1012  scsi_device_get(sdev))
1013  continue;
1014  spin_unlock_irqrestore(shost->host_lock, flags);
1015  scsi_remove_device(sdev);
1016  scsi_device_put(sdev);
1017  spin_lock_irqsave(shost->host_lock, flags);
1018  goto restart;
1019  }
1020  spin_unlock_irqrestore(shost->host_lock, flags);
1021 }
1022 
1031 void scsi_remove_target(struct device *dev)
1032 {
1033  struct Scsi_Host *shost = dev_to_shost(dev->parent);
1034  struct scsi_target *starget, *last = NULL;
1035  unsigned long flags;
1036 
1037  /* remove targets being careful to lookup next entry before
1038  * deleting the last
1039  */
1040  spin_lock_irqsave(shost->host_lock, flags);
1041  list_for_each_entry(starget, &shost->__targets, siblings) {
1042  if (starget->state == STARGET_DEL)
1043  continue;
1044  if (starget->dev.parent == dev || &starget->dev == dev) {
1045  /* assuming new targets arrive at the end */
1046  starget->reap_ref++;
1047  spin_unlock_irqrestore(shost->host_lock, flags);
1048  if (last)
1049  scsi_target_reap(last);
1050  last = starget;
1051  __scsi_remove_target(starget);
1052  spin_lock_irqsave(shost->host_lock, flags);
1053  }
1054  }
1055  spin_unlock_irqrestore(shost->host_lock, flags);
1056 
1057  if (last)
1058  scsi_target_reap(last);
1059 }
1061 
1063 {
1064  drv->bus = &scsi_bus_type;
1065 
1066  return driver_register(drv);
1067 }
1069 
1071 {
1072  intf->class = &sdev_class;
1073 
1074  return class_interface_register(intf);
1075 }
1077 
1082 int scsi_sysfs_add_host(struct Scsi_Host *shost)
1083 {
1084  int error, i;
1085 
1086  /* add host specific attributes */
1087  if (shost->hostt->shost_attrs) {
1088  for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1089  error = device_create_file(&shost->shost_dev,
1090  shost->hostt->shost_attrs[i]);
1091  if (error)
1092  return error;
1093  }
1094  }
1095 
1096  transport_register_device(&shost->shost_gendev);
1097  transport_configure_device(&shost->shost_gendev);
1098  return 0;
1099 }
1100 
1101 static struct device_type scsi_dev_type = {
1102  .name = "scsi_device",
1103  .release = scsi_device_dev_release,
1104  .groups = scsi_sdev_attr_groups,
1105 };
1106 
1108 {
1109  unsigned long flags;
1110  struct Scsi_Host *shost = sdev->host;
1111  struct scsi_target *starget = sdev->sdev_target;
1112 
1113  device_initialize(&sdev->sdev_gendev);
1114  sdev->sdev_gendev.bus = &scsi_bus_type;
1115  sdev->sdev_gendev.type = &scsi_dev_type;
1116  dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1117  sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1118 
1119  device_initialize(&sdev->sdev_dev);
1120  sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1121  sdev->sdev_dev.class = &sdev_class;
1122  dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1123  sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1124  sdev->scsi_level = starget->scsi_level;
1125  transport_setup_device(&sdev->sdev_gendev);
1126  spin_lock_irqsave(shost->host_lock, flags);
1127  list_add_tail(&sdev->same_target_siblings, &starget->devices);
1128  list_add_tail(&sdev->siblings, &shost->__devices);
1129  spin_unlock_irqrestore(shost->host_lock, flags);
1130 }
1131 
1132 int scsi_is_sdev_device(const struct device *dev)
1133 {
1134  return dev->type == &scsi_dev_type;
1135 }
1137 
1138 /* A blank transport template that is used in drivers that don't
1139  * yet implement Transport Attributes */