Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scsi_transport_fc.c
Go to the documentation of this file.
1 /*
2  * FiberChannel transport specific attributes exported to sysfs.
3  *
4  * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * ========
21  *
22  * Copyright (C) 2004-2007 James Smart, Emulex Corporation
23  * Rewrite for host, target, device, and remote port attributes,
24  * statistics, and service functions...
25  * Add vports, etc
26  *
27  */
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <linux/netlink.h>
39 #include <net/netlink.h>
40 #include <scsi/scsi_netlink_fc.h>
41 #include <scsi/scsi_bsg_fc.h>
42 #include "scsi_priv.h"
44 
45 static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
46 static void fc_vport_sched_delete(struct work_struct *work);
47 static int fc_vport_setup(struct Scsi_Host *shost, int channel,
48  struct device *pdev, struct fc_vport_identifiers *ids,
49  struct fc_vport **vport);
50 static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
51 static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
52 static void fc_bsg_remove(struct request_queue *);
53 static void fc_bsg_goose_queue(struct fc_rport *);
54 
55 /*
56  * Module Parameters
57  */
58 
59 /*
60  * dev_loss_tmo: the default number of seconds that the FC transport
61  * should insulate the loss of a remote port.
62  * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
63  */
64 static unsigned int fc_dev_loss_tmo = 60; /* seconds */
65 
68  "Maximum number of seconds that the FC transport should"
69  " insulate the loss of a remote port. Once this value is"
70  " exceeded, the scsi target is removed. Value should be"
71  " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
72  " fast_io_fail_tmo is not set.");
73 
74 /*
75  * Redefine so that we can have same named attributes in the
76  * sdev/starget/host objects.
77  */
78 #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
79 struct device_attribute device_attr_##_prefix##_##_name = \
80  __ATTR(_name,_mode,_show,_store)
81 
82 #define fc_enum_name_search(title, table_type, table) \
83 static const char *get_fc_##title##_name(enum table_type table_key) \
84 { \
85  int i; \
86  char *name = NULL; \
87  \
88  for (i = 0; i < ARRAY_SIZE(table); i++) { \
89  if (table[i].value == table_key) { \
90  name = table[i].name; \
91  break; \
92  } \
93  } \
94  return name; \
95 }
96 
97 #define fc_enum_name_match(title, table_type, table) \
98 static int get_fc_##title##_match(const char *table_key, \
99  enum table_type *value) \
100 { \
101  int i; \
102  \
103  for (i = 0; i < ARRAY_SIZE(table); i++) { \
104  if (strncmp(table_key, table[i].name, \
105  table[i].matchlen) == 0) { \
106  *value = table[i].value; \
107  return 0; /* success */ \
108  } \
109  } \
110  return 1; /* failure */ \
111 }
112 
113 
114 /* Convert fc_port_type values to ascii string name */
115 static struct {
117  char *name;
118 } fc_port_type_names[] = {
119  { FC_PORTTYPE_UNKNOWN, "Unknown" },
120  { FC_PORTTYPE_OTHER, "Other" },
121  { FC_PORTTYPE_NOTPRESENT, "Not Present" },
122  { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" },
123  { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
124  { FC_PORTTYPE_LPORT, "LPort (private loop)" },
125  { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" },
126  { FC_PORTTYPE_NPIV, "NPIV VPORT" },
127 };
129 #define FC_PORTTYPE_MAX_NAMELEN 50
130 
131 /* Reuse fc_port_type enum function for vport_type */
132 #define get_fc_vport_type_name get_fc_port_type_name
133 
134 
135 /* Convert fc_host_event_code values to ascii string name */
136 static const struct {
138  char *name;
140  { FCH_EVT_LIP, "lip" },
141  { FCH_EVT_LINKUP, "link_up" },
142  { FCH_EVT_LINKDOWN, "link_down" },
143  { FCH_EVT_LIPRESET, "lip_reset" },
144  { FCH_EVT_RSCN, "rscn" },
145  { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
146  { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
147  { FCH_EVT_PORT_ONLINE, "port_online" },
148  { FCH_EVT_PORT_OFFLINE, "port_offline" },
149  { FCH_EVT_PORT_FABRIC, "port_fabric" },
150  { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
151  { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
152 };
155 #define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
156 
157 
158 /* Convert fc_port_state values to ascii string name */
159 static struct {
160  enum fc_port_state value;
161  char *name;
163  { FC_PORTSTATE_UNKNOWN, "Unknown" },
164  { FC_PORTSTATE_NOTPRESENT, "Not Present" },
165  { FC_PORTSTATE_ONLINE, "Online" },
166  { FC_PORTSTATE_OFFLINE, "Offline" },
167  { FC_PORTSTATE_BLOCKED, "Blocked" },
168  { FC_PORTSTATE_BYPASSED, "Bypassed" },
169  { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" },
170  { FC_PORTSTATE_LINKDOWN, "Linkdown" },
171  { FC_PORTSTATE_ERROR, "Error" },
172  { FC_PORTSTATE_LOOPBACK, "Loopback" },
173  { FC_PORTSTATE_DELETED, "Deleted" },
174 };
176 #define FC_PORTSTATE_MAX_NAMELEN 20
177 
178 
179 /* Convert fc_vport_state values to ascii string name */
180 static struct {
181  enum fc_vport_state value;
182  char *name;
184  { FC_VPORT_UNKNOWN, "Unknown" },
185  { FC_VPORT_ACTIVE, "Active" },
186  { FC_VPORT_DISABLED, "Disabled" },
187  { FC_VPORT_LINKDOWN, "Linkdown" },
188  { FC_VPORT_INITIALIZING, "Initializing" },
189  { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
190  { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
191  { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
192  { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
193  { FC_VPORT_FAILED, "VPort Failed" },
194 };
196 #define FC_VPORTSTATE_MAX_NAMELEN 24
197 
198 /* Reuse fc_vport_state enum function for vport_last_state */
199 #define get_fc_vport_last_state_name get_fc_vport_state_name
200 
201 
202 /* Convert fc_tgtid_binding_type values to ascii string name */
203 static const struct {
205  char *name;
206  int matchlen;
208  { FC_TGTID_BIND_NONE, "none", 4 },
209  { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
210  { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
211  { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
212 };
217 #define FC_BINDTYPE_MAX_NAMELEN 30
218 
219 
220 #define fc_bitfield_name_search(title, table) \
221 static ssize_t \
222 get_fc_##title##_names(u32 table_key, char *buf) \
223 { \
224  char *prefix = ""; \
225  ssize_t len = 0; \
226  int i; \
227  \
228  for (i = 0; i < ARRAY_SIZE(table); i++) { \
229  if (table[i].value & table_key) { \
230  len += sprintf(buf + len, "%s%s", \
231  prefix, table[i].name); \
232  prefix = ", "; \
233  } \
234  } \
235  len += sprintf(buf + len, "\n"); \
236  return len; \
237 }
238 
239 
240 /* Convert FC_COS bit values to ascii string name */
241 static const struct {
242  u32 value;
243  char *name;
245  { FC_COS_CLASS1, "Class 1" },
246  { FC_COS_CLASS2, "Class 2" },
247  { FC_COS_CLASS3, "Class 3" },
248  { FC_COS_CLASS4, "Class 4" },
249  { FC_COS_CLASS6, "Class 6" },
250 };
252 
253 
254 /* Convert FC_PORTSPEED bit values to ascii string name */
255 static const struct {
256  u32 value;
257  char *name;
259  { FC_PORTSPEED_1GBIT, "1 Gbit" },
260  { FC_PORTSPEED_2GBIT, "2 Gbit" },
261  { FC_PORTSPEED_4GBIT, "4 Gbit" },
262  { FC_PORTSPEED_10GBIT, "10 Gbit" },
263  { FC_PORTSPEED_8GBIT, "8 Gbit" },
264  { FC_PORTSPEED_16GBIT, "16 Gbit" },
265  { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
266 };
268 
269 
270 static int
271 show_fc_fc4s (char *buf, u8 *fc4_list)
272 {
273  int i, len=0;
274 
275  for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
276  len += sprintf(buf + len , "0x%02x ", *fc4_list);
277  len += sprintf(buf + len, "\n");
278  return len;
279 }
280 
281 
282 /* Convert FC_PORT_ROLE bit values to ascii string name */
283 static const struct {
284  u32 value;
285  char *name;
286 } fc_port_role_names[] = {
287  { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
288  { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
289  { FC_PORT_ROLE_IP_PORT, "IP Port" },
290 };
291 fc_bitfield_name_search(port_roles, fc_port_role_names)
292 
293 /*
294  * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
295  */
296 #define FC_WELLKNOWN_PORTID_MASK 0xfffff0
297 #define FC_WELLKNOWN_ROLE_MASK 0x00000f
298 #define FC_FPORT_PORTID 0x00000e
299 #define FC_FABCTLR_PORTID 0x00000d
300 #define FC_DIRSRVR_PORTID 0x00000c
301 #define FC_TIMESRVR_PORTID 0x00000b
302 #define FC_MGMTSRVR_PORTID 0x00000a
303 
304 
305 static void fc_timeout_deleted_rport(struct work_struct *work);
306 static void fc_timeout_fail_rport_io(struct work_struct *work);
307 static void fc_scsi_scan_rport(struct work_struct *work);
308 
309 /*
310  * Attribute counts pre object type...
311  * Increase these values if you add attributes
312  */
313 #define FC_STARGET_NUM_ATTRS 3
314 #define FC_RPORT_NUM_ATTRS 10
315 #define FC_VPORT_NUM_ATTRS 9
316 #define FC_HOST_NUM_ATTRS 29
317 
318 struct fc_internal {
319  struct scsi_transport_template t;
320  struct fc_function_template *f;
321 
322  /*
323  * For attributes : each object has :
324  * An array of the actual attributes structures
325  * An array of null-terminated pointers to the attribute
326  * structures - used for mid-layer interaction.
327  *
328  * The attribute containers for the starget and host are are
329  * part of the midlayer. As the remote port is specific to the
330  * fc transport, we must provide the attribute container.
331  */
332  struct device_attribute private_starget_attrs[
334  struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
335 
336  struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
337  struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
338 
339  struct transport_container rport_attr_cont;
340  struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
341  struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
342 
343  struct transport_container vport_attr_cont;
344  struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
345  struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
346 };
347 
348 #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
349 
350 static int fc_target_setup(struct transport_container *tc, struct device *dev,
351  struct device *cdev)
352 {
353  struct scsi_target *starget = to_scsi_target(dev);
354  struct fc_rport *rport = starget_to_rport(starget);
355 
356  /*
357  * if parent is remote port, use values from remote port.
358  * Otherwise, this host uses the fc_transport, but not the
359  * remote port interface. As such, initialize to known non-values.
360  */
361  if (rport) {
362  fc_starget_node_name(starget) = rport->node_name;
363  fc_starget_port_name(starget) = rport->port_name;
364  fc_starget_port_id(starget) = rport->port_id;
365  } else {
366  fc_starget_node_name(starget) = -1;
367  fc_starget_port_name(starget) = -1;
368  fc_starget_port_id(starget) = -1;
369  }
370 
371  return 0;
372 }
373 
374 static DECLARE_TRANSPORT_CLASS(fc_transport_class,
375  "fc_transport",
376  fc_target_setup,
377  NULL,
378  NULL);
379 
380 static int fc_host_setup(struct transport_container *tc, struct device *dev,
381  struct device *cdev)
382 {
383  struct Scsi_Host *shost = dev_to_shost(dev);
384  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
385 
386  /*
387  * Set default values easily detected by the midlayer as
388  * failure cases. The scsi lldd is responsible for initializing
389  * all transport attributes to valid values per host.
390  */
391  fc_host->node_name = -1;
392  fc_host->port_name = -1;
393  fc_host->permanent_port_name = -1;
395  memset(fc_host->supported_fc4s, 0,
396  sizeof(fc_host->supported_fc4s));
398  fc_host->maxframe_size = -1;
399  fc_host->max_npiv_vports = 0;
400  memset(fc_host->serial_number, 0,
401  sizeof(fc_host->serial_number));
402  memset(fc_host->manufacturer, 0,
403  sizeof(fc_host->manufacturer));
404  memset(fc_host->model, 0,
405  sizeof(fc_host->model));
406  memset(fc_host->model_description, 0,
407  sizeof(fc_host->model_description));
408  memset(fc_host->hardware_version, 0,
409  sizeof(fc_host->hardware_version));
410  memset(fc_host->driver_version, 0,
411  sizeof(fc_host->driver_version));
412  memset(fc_host->firmware_version, 0,
413  sizeof(fc_host->firmware_version));
414  memset(fc_host->optionrom_version, 0,
415  sizeof(fc_host->optionrom_version));
416 
417  fc_host->port_id = -1;
418  fc_host->port_type = FC_PORTTYPE_UNKNOWN;
419  fc_host->port_state = FC_PORTSTATE_UNKNOWN;
420  memset(fc_host->active_fc4s, 0,
421  sizeof(fc_host->active_fc4s));
422  fc_host->speed = FC_PORTSPEED_UNKNOWN;
423  fc_host->fabric_name = -1;
424  memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
425  memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
426 
428 
429  INIT_LIST_HEAD(&fc_host->rports);
430  INIT_LIST_HEAD(&fc_host->rport_bindings);
431  INIT_LIST_HEAD(&fc_host->vports);
432  fc_host->next_rport_number = 0;
433  fc_host->next_target_id = 0;
434  fc_host->next_vport_number = 0;
435  fc_host->npiv_vports_inuse = 0;
436 
437  snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
438  "fc_wq_%d", shost->host_no);
439  fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0);
440  if (!fc_host->work_q)
441  return -ENOMEM;
442 
443  fc_host->dev_loss_tmo = fc_dev_loss_tmo;
444  snprintf(fc_host->devloss_work_q_name,
445  sizeof(fc_host->devloss_work_q_name),
446  "fc_dl_%d", shost->host_no);
447  fc_host->devloss_work_q =
448  alloc_workqueue(fc_host->devloss_work_q_name, 0, 0);
449  if (!fc_host->devloss_work_q) {
450  destroy_workqueue(fc_host->work_q);
451  fc_host->work_q = NULL;
452  return -ENOMEM;
453  }
454 
455  fc_bsg_hostadd(shost, fc_host);
456  /* ignore any bsg add error - we just can't do sgio */
457 
458  return 0;
459 }
460 
461 static int fc_host_remove(struct transport_container *tc, struct device *dev,
462  struct device *cdev)
463 {
464  struct Scsi_Host *shost = dev_to_shost(dev);
465  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
466 
467  fc_bsg_remove(fc_host->rqst_q);
468  return 0;
469 }
470 
471 static DECLARE_TRANSPORT_CLASS(fc_host_class,
472  "fc_host",
473  fc_host_setup,
474  fc_host_remove,
475  NULL);
476 
477 /*
478  * Setup and Remove actions for remote ports are handled
479  * in the service functions below.
480  */
481 static DECLARE_TRANSPORT_CLASS(fc_rport_class,
482  "fc_remote_ports",
483  NULL,
484  NULL,
485  NULL);
486 
487 /*
488  * Setup and Remove actions for virtual ports are handled
489  * in the service functions below.
490  */
491 static DECLARE_TRANSPORT_CLASS(fc_vport_class,
492  "fc_vports",
493  NULL,
494  NULL,
495  NULL);
496 
497 /*
498  * Netlink Infrastructure
499  */
500 
501 static atomic_t fc_event_seq;
502 
511 u32
513 {
514  return atomic_add_return(1, &fc_event_seq);
515 }
517 
518 
529 void
530 fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
532 {
533  struct sk_buff *skb;
534  struct nlmsghdr *nlh;
535  struct fc_nl_event *event;
536  const char *name;
537  u32 len, skblen;
538  int err;
539 
540  if (!scsi_nl_sock) {
541  err = -ENOENT;
542  goto send_fail;
543  }
544 
545  len = FC_NL_MSGALIGN(sizeof(*event));
546  skblen = NLMSG_SPACE(len);
547 
548  skb = alloc_skb(skblen, GFP_KERNEL);
549  if (!skb) {
550  err = -ENOBUFS;
551  goto send_fail;
552  }
553 
554  nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
555  skblen - sizeof(*nlh), 0);
556  if (!nlh) {
557  err = -ENOBUFS;
558  goto send_fail_skb;
559  }
560  event = NLMSG_DATA(nlh);
561 
563  FC_NL_ASYNC_EVENT, len);
564  event->seconds = get_seconds();
565  event->vendor_id = 0;
566  event->host_no = shost->host_no;
567  event->event_datalen = sizeof(u32); /* bytes */
568  event->event_num = event_number;
569  event->event_code = event_code;
570  event->event_data = event_data;
571 
572  nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
573  GFP_KERNEL);
574  return;
575 
576 send_fail_skb:
577  kfree_skb(skb);
578 send_fail:
579  name = get_fc_host_event_code_name(event_code);
581  "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
582  __func__, shost->host_no,
583  (name) ? name : "<unknown>", event_data, err);
584  return;
585 }
587 
588 
600 void
601 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
602  u32 data_len, char * data_buf, u64 vendor_id)
603 {
604  struct sk_buff *skb;
605  struct nlmsghdr *nlh;
606  struct fc_nl_event *event;
607  u32 len, skblen;
608  int err;
609 
610  if (!scsi_nl_sock) {
611  err = -ENOENT;
612  goto send_vendor_fail;
613  }
614 
615  len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
616  skblen = NLMSG_SPACE(len);
617 
618  skb = alloc_skb(skblen, GFP_KERNEL);
619  if (!skb) {
620  err = -ENOBUFS;
621  goto send_vendor_fail;
622  }
623 
624  nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
625  skblen - sizeof(*nlh), 0);
626  if (!nlh) {
627  err = -ENOBUFS;
628  goto send_vendor_fail_skb;
629  }
630  event = NLMSG_DATA(nlh);
631 
633  FC_NL_ASYNC_EVENT, len);
634  event->seconds = get_seconds();
635  event->vendor_id = vendor_id;
636  event->host_no = shost->host_no;
637  event->event_datalen = data_len; /* bytes */
638  event->event_num = event_number;
639  event->event_code = FCH_EVT_VENDOR_UNIQUE;
640  memcpy(&event->event_data, data_buf, data_len);
641 
642  nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
643  GFP_KERNEL);
644  return;
645 
646 send_vendor_fail_skb:
647  kfree_skb(skb);
648 send_vendor_fail:
650  "%s: Dropped Event : host %d vendor_unique - err %d\n",
651  __func__, shost->host_no, err);
652  return;
653 }
655 
656 
657 
658 static __init int fc_transport_init(void)
659 {
660  int error;
661 
662  atomic_set(&fc_event_seq, 0);
663 
664  error = transport_class_register(&fc_host_class);
665  if (error)
666  return error;
667  error = transport_class_register(&fc_vport_class);
668  if (error)
669  goto unreg_host_class;
670  error = transport_class_register(&fc_rport_class);
671  if (error)
672  goto unreg_vport_class;
673  error = transport_class_register(&fc_transport_class);
674  if (error)
675  goto unreg_rport_class;
676  return 0;
677 
678 unreg_rport_class:
679  transport_class_unregister(&fc_rport_class);
680 unreg_vport_class:
681  transport_class_unregister(&fc_vport_class);
682 unreg_host_class:
683  transport_class_unregister(&fc_host_class);
684  return error;
685 }
686 
687 static void __exit fc_transport_exit(void)
688 {
689  transport_class_unregister(&fc_transport_class);
690  transport_class_unregister(&fc_rport_class);
691  transport_class_unregister(&fc_host_class);
692  transport_class_unregister(&fc_vport_class);
693 }
694 
695 /*
696  * FC Remote Port Attribute Management
697  */
698 
699 #define fc_rport_show_function(field, format_string, sz, cast) \
700 static ssize_t \
701 show_fc_rport_##field (struct device *dev, \
702  struct device_attribute *attr, char *buf) \
703 { \
704  struct fc_rport *rport = transport_class_to_rport(dev); \
705  struct Scsi_Host *shost = rport_to_shost(rport); \
706  struct fc_internal *i = to_fc_internal(shost->transportt); \
707  if ((i->f->get_rport_##field) && \
708  !((rport->port_state == FC_PORTSTATE_BLOCKED) || \
709  (rport->port_state == FC_PORTSTATE_DELETED) || \
710  (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \
711  i->f->get_rport_##field(rport); \
712  return snprintf(buf, sz, format_string, cast rport->field); \
713 }
714 
715 #define fc_rport_store_function(field) \
716 static ssize_t \
717 store_fc_rport_##field(struct device *dev, \
718  struct device_attribute *attr, \
719  const char *buf, size_t count) \
720 { \
721  int val; \
722  struct fc_rport *rport = transport_class_to_rport(dev); \
723  struct Scsi_Host *shost = rport_to_shost(rport); \
724  struct fc_internal *i = to_fc_internal(shost->transportt); \
725  char *cp; \
726  if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
727  (rport->port_state == FC_PORTSTATE_DELETED) || \
728  (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
729  return -EBUSY; \
730  val = simple_strtoul(buf, &cp, 0); \
731  if (*cp && (*cp != '\n')) \
732  return -EINVAL; \
733  i->f->set_rport_##field(rport, val); \
734  return count; \
735 }
736 
737 #define fc_rport_rd_attr(field, format_string, sz) \
738  fc_rport_show_function(field, format_string, sz, ) \
739 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
740  show_fc_rport_##field, NULL)
741 
742 #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \
743  fc_rport_show_function(field, format_string, sz, (cast)) \
744 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
745  show_fc_rport_##field, NULL)
746 
747 #define fc_rport_rw_attr(field, format_string, sz) \
748  fc_rport_show_function(field, format_string, sz, ) \
749  fc_rport_store_function(field) \
750 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \
751  show_fc_rport_##field, \
752  store_fc_rport_##field)
753 
754 
755 #define fc_private_rport_show_function(field, format_string, sz, cast) \
756 static ssize_t \
757 show_fc_rport_##field (struct device *dev, \
758  struct device_attribute *attr, char *buf) \
759 { \
760  struct fc_rport *rport = transport_class_to_rport(dev); \
761  return snprintf(buf, sz, format_string, cast rport->field); \
762 }
763 
764 #define fc_private_rport_rd_attr(field, format_string, sz) \
765  fc_private_rport_show_function(field, format_string, sz, ) \
766 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
767  show_fc_rport_##field, NULL)
768 
769 #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \
770  fc_private_rport_show_function(field, format_string, sz, (cast)) \
771 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
772  show_fc_rport_##field, NULL)
773 
774 
775 #define fc_private_rport_rd_enum_attr(title, maxlen) \
776 static ssize_t \
777 show_fc_rport_##title (struct device *dev, \
778  struct device_attribute *attr, char *buf) \
779 { \
780  struct fc_rport *rport = transport_class_to_rport(dev); \
781  const char *name; \
782  name = get_fc_##title##_name(rport->title); \
783  if (!name) \
784  return -EINVAL; \
785  return snprintf(buf, maxlen, "%s\n", name); \
786 } \
787 static FC_DEVICE_ATTR(rport, title, S_IRUGO, \
788  show_fc_rport_##title, NULL)
789 
790 
791 #define SETUP_RPORT_ATTRIBUTE_RD(field) \
792  i->private_rport_attrs[count] = device_attr_rport_##field; \
793  i->private_rport_attrs[count].attr.mode = S_IRUGO; \
794  i->private_rport_attrs[count].store = NULL; \
795  i->rport_attrs[count] = &i->private_rport_attrs[count]; \
796  if (i->f->show_rport_##field) \
797  count++
798 
799 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \
800  i->private_rport_attrs[count] = device_attr_rport_##field; \
801  i->private_rport_attrs[count].attr.mode = S_IRUGO; \
802  i->private_rport_attrs[count].store = NULL; \
803  i->rport_attrs[count] = &i->private_rport_attrs[count]; \
804  count++
805 
806 #define SETUP_RPORT_ATTRIBUTE_RW(field) \
807  i->private_rport_attrs[count] = device_attr_rport_##field; \
808  if (!i->f->set_rport_##field) { \
809  i->private_rport_attrs[count].attr.mode = S_IRUGO; \
810  i->private_rport_attrs[count].store = NULL; \
811  } \
812  i->rport_attrs[count] = &i->private_rport_attrs[count]; \
813  if (i->f->show_rport_##field) \
814  count++
815 
816 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
817 { \
818  i->private_rport_attrs[count] = device_attr_rport_##field; \
819  i->rport_attrs[count] = &i->private_rport_attrs[count]; \
820  count++; \
821 }
822 
823 
824 /* The FC Transport Remote Port Attributes: */
825 
826 /* Fixed Remote Port Attributes */
827 
828 fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
829 
830 static ssize_t
831 show_fc_rport_supported_classes (struct device *dev,
832  struct device_attribute *attr, char *buf)
833 {
834  struct fc_rport *rport = transport_class_to_rport(dev);
836  return snprintf(buf, 20, "unspecified\n");
837  return get_fc_cos_names(rport->supported_classes, buf);
838 }
840  show_fc_rport_supported_classes, NULL);
841 
842 /* Dynamic Remote Port Attributes */
843 
844 /*
845  * dev_loss_tmo attribute
846  */
847 static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
848 {
849  char *cp;
850 
851  *val = simple_strtoul(buf, &cp, 0);
852  if ((*cp && (*cp != '\n')) || (*val < 0))
853  return -EINVAL;
854  /*
855  * Check for overflow; dev_loss_tmo is u32
856  */
857  if (*val > UINT_MAX)
858  return -EINVAL;
859 
860  return 0;
861 }
862 
863 static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
864  unsigned long val)
865 {
866  struct Scsi_Host *shost = rport_to_shost(rport);
867  struct fc_internal *i = to_fc_internal(shost->transportt);
868 
869  if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
870  (rport->port_state == FC_PORTSTATE_DELETED) ||
872  return -EBUSY;
873  /*
874  * Check for overflow; dev_loss_tmo is u32
875  */
876  if (val > UINT_MAX)
877  return -EINVAL;
878 
879  /*
880  * If fast_io_fail is off we have to cap
881  * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
882  */
883  if (rport->fast_io_fail_tmo == -1 &&
885  return -EINVAL;
886 
887  i->f->set_rport_dev_loss_tmo(rport, val);
888  return 0;
889 }
890 
892 static ssize_t
893 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
894  const char *buf, size_t count)
895 {
896  struct fc_rport *rport = transport_class_to_rport(dev);
897  unsigned long val;
898  int rc;
899 
900  rc = fc_str_to_dev_loss(buf, &val);
901  if (rc)
902  return rc;
903 
904  rc = fc_rport_set_dev_loss_tmo(rport, val);
905  if (rc)
906  return rc;
907  return count;
908 }
909 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
910  show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
911 
912 
913 /* Private Remote Port Attributes */
914 
915 fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
916 fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
917 fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
918 
919 static ssize_t
920 show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
921  char *buf)
922 {
923  struct fc_rport *rport = transport_class_to_rport(dev);
924 
925  /* identify any roles that are port_id specific */
926  if ((rport->port_id != -1) &&
927  (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
928  FC_WELLKNOWN_PORTID_MASK) {
929  switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
930  case FC_FPORT_PORTID:
931  return snprintf(buf, 30, "Fabric Port\n");
932  case FC_FABCTLR_PORTID:
933  return snprintf(buf, 30, "Fabric Controller\n");
934  case FC_DIRSRVR_PORTID:
935  return snprintf(buf, 30, "Directory Server\n");
936  case FC_TIMESRVR_PORTID:
937  return snprintf(buf, 30, "Time Server\n");
938  case FC_MGMTSRVR_PORTID:
939  return snprintf(buf, 30, "Management Server\n");
940  default:
941  return snprintf(buf, 30, "Unknown Fabric Entity\n");
942  }
943  } else {
944  if (rport->roles == FC_PORT_ROLE_UNKNOWN)
945  return snprintf(buf, 20, "unknown\n");
946  return get_fc_port_roles_names(rport->roles, buf);
947  }
948 }
949 static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
950  show_fc_rport_roles, NULL);
951 
952 fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
954 
955 /*
956  * fast_io_fail_tmo attribute
957  */
958 static ssize_t
959 show_fc_rport_fast_io_fail_tmo (struct device *dev,
960  struct device_attribute *attr, char *buf)
961 {
962  struct fc_rport *rport = transport_class_to_rport(dev);
963 
964  if (rport->fast_io_fail_tmo == -1)
965  return snprintf(buf, 5, "off\n");
966  return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
967 }
968 
969 static ssize_t
970 store_fc_rport_fast_io_fail_tmo(struct device *dev,
971  struct device_attribute *attr, const char *buf,
972  size_t count)
973 {
974  int val;
975  char *cp;
976  struct fc_rport *rport = transport_class_to_rport(dev);
977 
978  if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
979  (rport->port_state == FC_PORTSTATE_DELETED) ||
981  return -EBUSY;
982  if (strncmp(buf, "off", 3) == 0)
983  rport->fast_io_fail_tmo = -1;
984  else {
985  val = simple_strtoul(buf, &cp, 0);
986  if ((*cp && (*cp != '\n')) || (val < 0))
987  return -EINVAL;
988  /*
989  * Cap fast_io_fail by dev_loss_tmo or
990  * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
991  */
992  if ((val >= rport->dev_loss_tmo) ||
994  return -EINVAL;
995 
996  rport->fast_io_fail_tmo = val;
997  }
998  return count;
999 }
1001  show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
1002 
1003 
1004 /*
1005  * FC SCSI Target Attribute Management
1006  */
1007 
1008 /*
1009  * Note: in the target show function we recognize when the remote
1010  * port is in the hierarchy and do not allow the driver to get
1011  * involved in sysfs functions. The driver only gets involved if
1012  * it's the "old" style that doesn't use rports.
1013  */
1014 #define fc_starget_show_function(field, format_string, sz, cast) \
1015 static ssize_t \
1016 show_fc_starget_##field (struct device *dev, \
1017  struct device_attribute *attr, char *buf) \
1018 { \
1019  struct scsi_target *starget = transport_class_to_starget(dev); \
1020  struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
1021  struct fc_internal *i = to_fc_internal(shost->transportt); \
1022  struct fc_rport *rport = starget_to_rport(starget); \
1023  if (rport) \
1024  fc_starget_##field(starget) = rport->field; \
1025  else if (i->f->get_starget_##field) \
1026  i->f->get_starget_##field(starget); \
1027  return snprintf(buf, sz, format_string, \
1028  cast fc_starget_##field(starget)); \
1029 }
1030 
1031 #define fc_starget_rd_attr(field, format_string, sz) \
1032  fc_starget_show_function(field, format_string, sz, ) \
1033 static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
1034  show_fc_starget_##field, NULL)
1035 
1036 #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \
1037  fc_starget_show_function(field, format_string, sz, (cast)) \
1038 static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
1039  show_fc_starget_##field, NULL)
1040 
1041 #define SETUP_STARGET_ATTRIBUTE_RD(field) \
1042  i->private_starget_attrs[count] = device_attr_starget_##field; \
1043  i->private_starget_attrs[count].attr.mode = S_IRUGO; \
1044  i->private_starget_attrs[count].store = NULL; \
1045  i->starget_attrs[count] = &i->private_starget_attrs[count]; \
1046  if (i->f->show_starget_##field) \
1047  count++
1048 
1049 #define SETUP_STARGET_ATTRIBUTE_RW(field) \
1050  i->private_starget_attrs[count] = device_attr_starget_##field; \
1051  if (!i->f->set_starget_##field) { \
1052  i->private_starget_attrs[count].attr.mode = S_IRUGO; \
1053  i->private_starget_attrs[count].store = NULL; \
1054  } \
1055  i->starget_attrs[count] = &i->private_starget_attrs[count]; \
1056  if (i->f->show_starget_##field) \
1057  count++
1058 
1059 /* The FC Transport SCSI Target Attributes: */
1060 fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1061 fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1062 fc_starget_rd_attr(port_id, "0x%06x\n", 20);
1063 
1064 
1065 /*
1066  * FC Virtual Port Attribute Management
1067  */
1068 
1069 #define fc_vport_show_function(field, format_string, sz, cast) \
1070 static ssize_t \
1071 show_fc_vport_##field (struct device *dev, \
1072  struct device_attribute *attr, char *buf) \
1073 { \
1074  struct fc_vport *vport = transport_class_to_vport(dev); \
1075  struct Scsi_Host *shost = vport_to_shost(vport); \
1076  struct fc_internal *i = to_fc_internal(shost->transportt); \
1077  if ((i->f->get_vport_##field) && \
1078  !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
1079  i->f->get_vport_##field(vport); \
1080  return snprintf(buf, sz, format_string, cast vport->field); \
1081 }
1082 
1083 #define fc_vport_store_function(field) \
1084 static ssize_t \
1085 store_fc_vport_##field(struct device *dev, \
1086  struct device_attribute *attr, \
1087  const char *buf, size_t count) \
1088 { \
1089  int val; \
1090  struct fc_vport *vport = transport_class_to_vport(dev); \
1091  struct Scsi_Host *shost = vport_to_shost(vport); \
1092  struct fc_internal *i = to_fc_internal(shost->transportt); \
1093  char *cp; \
1094  if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1095  return -EBUSY; \
1096  val = simple_strtoul(buf, &cp, 0); \
1097  if (*cp && (*cp != '\n')) \
1098  return -EINVAL; \
1099  i->f->set_vport_##field(vport, val); \
1100  return count; \
1101 }
1102 
1103 #define fc_vport_store_str_function(field, slen) \
1104 static ssize_t \
1105 store_fc_vport_##field(struct device *dev, \
1106  struct device_attribute *attr, \
1107  const char *buf, size_t count) \
1108 { \
1109  struct fc_vport *vport = transport_class_to_vport(dev); \
1110  struct Scsi_Host *shost = vport_to_shost(vport); \
1111  struct fc_internal *i = to_fc_internal(shost->transportt); \
1112  unsigned int cnt=count; \
1113  \
1114  /* count may include a LF at end of string */ \
1115  if (buf[cnt-1] == '\n') \
1116  cnt--; \
1117  if (cnt > ((slen) - 1)) \
1118  return -EINVAL; \
1119  memcpy(vport->field, buf, cnt); \
1120  i->f->set_vport_##field(vport); \
1121  return count; \
1122 }
1123 
1124 #define fc_vport_rd_attr(field, format_string, sz) \
1125  fc_vport_show_function(field, format_string, sz, ) \
1126 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1127  show_fc_vport_##field, NULL)
1128 
1129 #define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
1130  fc_vport_show_function(field, format_string, sz, (cast)) \
1131 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1132  show_fc_vport_##field, NULL)
1133 
1134 #define fc_vport_rw_attr(field, format_string, sz) \
1135  fc_vport_show_function(field, format_string, sz, ) \
1136  fc_vport_store_function(field) \
1137 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1138  show_fc_vport_##field, \
1139  store_fc_vport_##field)
1140 
1141 #define fc_private_vport_show_function(field, format_string, sz, cast) \
1142 static ssize_t \
1143 show_fc_vport_##field (struct device *dev, \
1144  struct device_attribute *attr, char *buf) \
1145 { \
1146  struct fc_vport *vport = transport_class_to_vport(dev); \
1147  return snprintf(buf, sz, format_string, cast vport->field); \
1148 }
1149 
1150 #define fc_private_vport_store_u32_function(field) \
1151 static ssize_t \
1152 store_fc_vport_##field(struct device *dev, \
1153  struct device_attribute *attr, \
1154  const char *buf, size_t count) \
1155 { \
1156  u32 val; \
1157  struct fc_vport *vport = transport_class_to_vport(dev); \
1158  char *cp; \
1159  if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1160  return -EBUSY; \
1161  val = simple_strtoul(buf, &cp, 0); \
1162  if (*cp && (*cp != '\n')) \
1163  return -EINVAL; \
1164  vport->field = val; \
1165  return count; \
1166 }
1167 
1168 
1169 #define fc_private_vport_rd_attr(field, format_string, sz) \
1170  fc_private_vport_show_function(field, format_string, sz, ) \
1171 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1172  show_fc_vport_##field, NULL)
1173 
1174 #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
1175  fc_private_vport_show_function(field, format_string, sz, (cast)) \
1176 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1177  show_fc_vport_##field, NULL)
1178 
1179 #define fc_private_vport_rw_u32_attr(field, format_string, sz) \
1180  fc_private_vport_show_function(field, format_string, sz, ) \
1181  fc_private_vport_store_u32_function(field) \
1182 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1183  show_fc_vport_##field, \
1184  store_fc_vport_##field)
1185 
1186 
1187 #define fc_private_vport_rd_enum_attr(title, maxlen) \
1188 static ssize_t \
1189 show_fc_vport_##title (struct device *dev, \
1190  struct device_attribute *attr, \
1191  char *buf) \
1192 { \
1193  struct fc_vport *vport = transport_class_to_vport(dev); \
1194  const char *name; \
1195  name = get_fc_##title##_name(vport->title); \
1196  if (!name) \
1197  return -EINVAL; \
1198  return snprintf(buf, maxlen, "%s\n", name); \
1199 } \
1200 static FC_DEVICE_ATTR(vport, title, S_IRUGO, \
1201  show_fc_vport_##title, NULL)
1202 
1203 
1204 #define SETUP_VPORT_ATTRIBUTE_RD(field) \
1205  i->private_vport_attrs[count] = device_attr_vport_##field; \
1206  i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1207  i->private_vport_attrs[count].store = NULL; \
1208  i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1209  if (i->f->get_##field) \
1210  count++
1211  /* NOTE: Above MACRO differs: checks function not show bit */
1212 
1213 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
1214  i->private_vport_attrs[count] = device_attr_vport_##field; \
1215  i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1216  i->private_vport_attrs[count].store = NULL; \
1217  i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1218  count++
1219 
1220 #define SETUP_VPORT_ATTRIBUTE_WR(field) \
1221  i->private_vport_attrs[count] = device_attr_vport_##field; \
1222  i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1223  if (i->f->field) \
1224  count++
1225  /* NOTE: Above MACRO differs: checks function */
1226 
1227 #define SETUP_VPORT_ATTRIBUTE_RW(field) \
1228  i->private_vport_attrs[count] = device_attr_vport_##field; \
1229  if (!i->f->set_vport_##field) { \
1230  i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1231  i->private_vport_attrs[count].store = NULL; \
1232  } \
1233  i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1234  count++
1235  /* NOTE: Above MACRO differs: does not check show bit */
1236 
1237 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
1238 { \
1239  i->private_vport_attrs[count] = device_attr_vport_##field; \
1240  i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1241  count++; \
1242 }
1243 
1244 
1245 /* The FC Transport Virtual Port Attributes: */
1246 
1247 /* Fixed Virtual Port Attributes */
1248 
1249 /* Dynamic Virtual Port Attributes */
1250 
1251 /* Private Virtual Port Attributes */
1252 
1253 fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1254 fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1255 fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1256 fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1257 
1258 static ssize_t
1259 show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1260  char *buf)
1261 {
1262  struct fc_vport *vport = transport_class_to_vport(dev);
1263 
1264  if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1265  return snprintf(buf, 20, "unknown\n");
1266  return get_fc_port_roles_names(vport->roles, buf);
1267 }
1268 static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1269 
1270 fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1271 
1275 static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1276  show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1277 
1278 static ssize_t
1279 store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1280  const char *buf, size_t count)
1281 {
1282  struct fc_vport *vport = transport_class_to_vport(dev);
1283  struct Scsi_Host *shost = vport_to_shost(vport);
1284  unsigned long flags;
1285 
1286  spin_lock_irqsave(shost->host_lock, flags);
1287  if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1288  spin_unlock_irqrestore(shost->host_lock, flags);
1289  return -EBUSY;
1290  }
1291  vport->flags |= FC_VPORT_DELETING;
1292  spin_unlock_irqrestore(shost->host_lock, flags);
1293 
1294  fc_queue_work(shost, &vport->vport_delete_work);
1295  return count;
1296 }
1297 static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1298  NULL, store_fc_vport_delete);
1299 
1300 
1301 /*
1302  * Enable/Disable vport
1303  * Write "1" to disable, write "0" to enable
1304  */
1305 static ssize_t
1306 store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1307  const char *buf,
1308  size_t count)
1309 {
1310  struct fc_vport *vport = transport_class_to_vport(dev);
1311  struct Scsi_Host *shost = vport_to_shost(vport);
1312  struct fc_internal *i = to_fc_internal(shost->transportt);
1313  int stat;
1314 
1315  if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1316  return -EBUSY;
1317 
1318  if (*buf == '0') {
1319  if (vport->vport_state != FC_VPORT_DISABLED)
1320  return -EALREADY;
1321  } else if (*buf == '1') {
1322  if (vport->vport_state == FC_VPORT_DISABLED)
1323  return -EALREADY;
1324  } else
1325  return -EINVAL;
1326 
1327  stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1328  return stat ? stat : count;
1329 }
1330 static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1331  NULL, store_fc_vport_disable);
1332 
1333 
1334 /*
1335  * Host Attribute Management
1336  */
1337 
1338 #define fc_host_show_function(field, format_string, sz, cast) \
1339 static ssize_t \
1340 show_fc_host_##field (struct device *dev, \
1341  struct device_attribute *attr, char *buf) \
1342 { \
1343  struct Scsi_Host *shost = transport_class_to_shost(dev); \
1344  struct fc_internal *i = to_fc_internal(shost->transportt); \
1345  if (i->f->get_host_##field) \
1346  i->f->get_host_##field(shost); \
1347  return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1348 }
1349 
1350 #define fc_host_store_function(field) \
1351 static ssize_t \
1352 store_fc_host_##field(struct device *dev, \
1353  struct device_attribute *attr, \
1354  const char *buf, size_t count) \
1355 { \
1356  int val; \
1357  struct Scsi_Host *shost = transport_class_to_shost(dev); \
1358  struct fc_internal *i = to_fc_internal(shost->transportt); \
1359  char *cp; \
1360  \
1361  val = simple_strtoul(buf, &cp, 0); \
1362  if (*cp && (*cp != '\n')) \
1363  return -EINVAL; \
1364  i->f->set_host_##field(shost, val); \
1365  return count; \
1366 }
1367 
1368 #define fc_host_store_str_function(field, slen) \
1369 static ssize_t \
1370 store_fc_host_##field(struct device *dev, \
1371  struct device_attribute *attr, \
1372  const char *buf, size_t count) \
1373 { \
1374  struct Scsi_Host *shost = transport_class_to_shost(dev); \
1375  struct fc_internal *i = to_fc_internal(shost->transportt); \
1376  unsigned int cnt=count; \
1377  \
1378  /* count may include a LF at end of string */ \
1379  if (buf[cnt-1] == '\n') \
1380  cnt--; \
1381  if (cnt > ((slen) - 1)) \
1382  return -EINVAL; \
1383  memcpy(fc_host_##field(shost), buf, cnt); \
1384  i->f->set_host_##field(shost); \
1385  return count; \
1386 }
1387 
1388 #define fc_host_rd_attr(field, format_string, sz) \
1389  fc_host_show_function(field, format_string, sz, ) \
1390 static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1391  show_fc_host_##field, NULL)
1392 
1393 #define fc_host_rd_attr_cast(field, format_string, sz, cast) \
1394  fc_host_show_function(field, format_string, sz, (cast)) \
1395 static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1396  show_fc_host_##field, NULL)
1397 
1398 #define fc_host_rw_attr(field, format_string, sz) \
1399  fc_host_show_function(field, format_string, sz, ) \
1400  fc_host_store_function(field) \
1401 static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \
1402  show_fc_host_##field, \
1403  store_fc_host_##field)
1404 
1405 #define fc_host_rd_enum_attr(title, maxlen) \
1406 static ssize_t \
1407 show_fc_host_##title (struct device *dev, \
1408  struct device_attribute *attr, char *buf) \
1409 { \
1410  struct Scsi_Host *shost = transport_class_to_shost(dev); \
1411  struct fc_internal *i = to_fc_internal(shost->transportt); \
1412  const char *name; \
1413  if (i->f->get_host_##title) \
1414  i->f->get_host_##title(shost); \
1415  name = get_fc_##title##_name(fc_host_##title(shost)); \
1416  if (!name) \
1417  return -EINVAL; \
1418  return snprintf(buf, maxlen, "%s\n", name); \
1419 } \
1420 static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1421 
1422 #define SETUP_HOST_ATTRIBUTE_RD(field) \
1423  i->private_host_attrs[count] = device_attr_host_##field; \
1424  i->private_host_attrs[count].attr.mode = S_IRUGO; \
1425  i->private_host_attrs[count].store = NULL; \
1426  i->host_attrs[count] = &i->private_host_attrs[count]; \
1427  if (i->f->show_host_##field) \
1428  count++
1429 
1430 #define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
1431  i->private_host_attrs[count] = device_attr_host_##field; \
1432  i->private_host_attrs[count].attr.mode = S_IRUGO; \
1433  i->private_host_attrs[count].store = NULL; \
1434  i->host_attrs[count] = &i->private_host_attrs[count]; \
1435  count++
1436 
1437 #define SETUP_HOST_ATTRIBUTE_RW(field) \
1438  i->private_host_attrs[count] = device_attr_host_##field; \
1439  if (!i->f->set_host_##field) { \
1440  i->private_host_attrs[count].attr.mode = S_IRUGO; \
1441  i->private_host_attrs[count].store = NULL; \
1442  } \
1443  i->host_attrs[count] = &i->private_host_attrs[count]; \
1444  if (i->f->show_host_##field) \
1445  count++
1446 
1447 
1448 #define fc_private_host_show_function(field, format_string, sz, cast) \
1449 static ssize_t \
1450 show_fc_host_##field (struct device *dev, \
1451  struct device_attribute *attr, char *buf) \
1452 { \
1453  struct Scsi_Host *shost = transport_class_to_shost(dev); \
1454  return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1455 }
1456 
1457 #define fc_private_host_rd_attr(field, format_string, sz) \
1458  fc_private_host_show_function(field, format_string, sz, ) \
1459 static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1460  show_fc_host_##field, NULL)
1461 
1462 #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \
1463  fc_private_host_show_function(field, format_string, sz, (cast)) \
1464 static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1465  show_fc_host_##field, NULL)
1466 
1467 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \
1468  i->private_host_attrs[count] = device_attr_host_##field; \
1469  i->private_host_attrs[count].attr.mode = S_IRUGO; \
1470  i->private_host_attrs[count].store = NULL; \
1471  i->host_attrs[count] = &i->private_host_attrs[count]; \
1472  count++
1473 
1474 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \
1475 { \
1476  i->private_host_attrs[count] = device_attr_host_##field; \
1477  i->host_attrs[count] = &i->private_host_attrs[count]; \
1478  count++; \
1479 }
1480 
1481 
1482 /* Fixed Host Attributes */
1483 
1484 static ssize_t
1485 show_fc_host_supported_classes (struct device *dev,
1486  struct device_attribute *attr, char *buf)
1487 {
1488  struct Scsi_Host *shost = transport_class_to_shost(dev);
1489 
1491  return snprintf(buf, 20, "unspecified\n");
1492 
1493  return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1494 }
1496  show_fc_host_supported_classes, NULL);
1497 
1498 static ssize_t
1499 show_fc_host_supported_fc4s (struct device *dev,
1500  struct device_attribute *attr, char *buf)
1501 {
1502  struct Scsi_Host *shost = transport_class_to_shost(dev);
1503  return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1504 }
1506  show_fc_host_supported_fc4s, NULL);
1507 
1508 static ssize_t
1509 show_fc_host_supported_speeds (struct device *dev,
1510  struct device_attribute *attr, char *buf)
1511 {
1512  struct Scsi_Host *shost = transport_class_to_shost(dev);
1513 
1515  return snprintf(buf, 20, "unknown\n");
1516 
1517  return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1518 }
1519 static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1520  show_fc_host_supported_speeds, NULL);
1521 
1522 
1523 fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1524 fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1525 fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1526  unsigned long long);
1527 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1528 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1532 fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1534 fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1536 fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1537 
1538 
1539 /* Dynamic Host Attributes */
1540 
1541 static ssize_t
1542 show_fc_host_active_fc4s (struct device *dev,
1543  struct device_attribute *attr, char *buf)
1544 {
1545  struct Scsi_Host *shost = transport_class_to_shost(dev);
1546  struct fc_internal *i = to_fc_internal(shost->transportt);
1547 
1548  if (i->f->get_host_active_fc4s)
1549  i->f->get_host_active_fc4s(shost);
1550 
1551  return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1552 }
1553 static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1554  show_fc_host_active_fc4s, NULL);
1555 
1556 static ssize_t
1557 show_fc_host_speed (struct device *dev,
1558  struct device_attribute *attr, char *buf)
1559 {
1560  struct Scsi_Host *shost = transport_class_to_shost(dev);
1561  struct fc_internal *i = to_fc_internal(shost->transportt);
1562 
1563  if (i->f->get_host_speed)
1564  i->f->get_host_speed(shost);
1565 
1566  if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1567  return snprintf(buf, 20, "unknown\n");
1568 
1569  return get_fc_port_speed_names(fc_host_speed(shost), buf);
1570 }
1571 static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1572  show_fc_host_speed, NULL);
1573 
1574 
1575 fc_host_rd_attr(port_id, "0x%06x\n", 20);
1576 fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1577 fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1578 fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1579 fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1580 
1581 fc_private_host_show_function(system_hostname, "%s\n",
1582  FC_SYMBOLIC_NAME_SIZE + 1, )
1584 static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1585  show_fc_host_system_hostname, store_fc_host_system_hostname);
1586 
1587 
1588 /* Private Host Attributes */
1589 
1590 static ssize_t
1591 show_fc_private_host_tgtid_bind_type(struct device *dev,
1592  struct device_attribute *attr, char *buf)
1593 {
1594  struct Scsi_Host *shost = transport_class_to_shost(dev);
1595  const char *name;
1596 
1597  name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
1598  if (!name)
1599  return -EINVAL;
1600  return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
1601 }
1602 
1603 #define get_list_head_entry(pos, head, member) \
1604  pos = list_entry((head)->next, typeof(*pos), member)
1605 
1606 static ssize_t
1607 store_fc_private_host_tgtid_bind_type(struct device *dev,
1608  struct device_attribute *attr, const char *buf, size_t count)
1609 {
1610  struct Scsi_Host *shost = transport_class_to_shost(dev);
1611  struct fc_rport *rport;
1612  enum fc_tgtid_binding_type val;
1613  unsigned long flags;
1614 
1615  if (get_fc_tgtid_bind_type_match(buf, &val))
1616  return -EINVAL;
1617 
1618  /* if changing bind type, purge all unused consistent bindings */
1619  if (val != fc_host_tgtid_bind_type(shost)) {
1620  spin_lock_irqsave(shost->host_lock, flags);
1621  while (!list_empty(&fc_host_rport_bindings(shost))) {
1622  get_list_head_entry(rport,
1623  &fc_host_rport_bindings(shost), peers);
1624  list_del(&rport->peers);
1626  fc_queue_work(shost, &rport->rport_delete_work);
1627  }
1628  spin_unlock_irqrestore(shost->host_lock, flags);
1629  }
1630 
1631  fc_host_tgtid_bind_type(shost) = val;
1632  return count;
1633 }
1634 
1635 static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
1636  show_fc_private_host_tgtid_bind_type,
1637  store_fc_private_host_tgtid_bind_type);
1638 
1639 static ssize_t
1640 store_fc_private_host_issue_lip(struct device *dev,
1641  struct device_attribute *attr, const char *buf, size_t count)
1642 {
1643  struct Scsi_Host *shost = transport_class_to_shost(dev);
1644  struct fc_internal *i = to_fc_internal(shost->transportt);
1645  int ret;
1646 
1647  /* ignore any data value written to the attribute */
1648  if (i->f->issue_fc_host_lip) {
1649  ret = i->f->issue_fc_host_lip(shost);
1650  return ret ? ret: count;
1651  }
1652 
1653  return -ENOENT;
1654 }
1655 
1656 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1657  store_fc_private_host_issue_lip);
1658 
1659 static ssize_t
1660 store_fc_private_host_dev_loss_tmo(struct device *dev,
1661  struct device_attribute *attr,
1662  const char *buf, size_t count)
1663 {
1664  struct Scsi_Host *shost = transport_class_to_shost(dev);
1665  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1666  struct fc_rport *rport;
1667  unsigned long val, flags;
1668  int rc;
1669 
1670  rc = fc_str_to_dev_loss(buf, &val);
1671  if (rc)
1672  return rc;
1673 
1674  fc_host_dev_loss_tmo(shost) = val;
1675  spin_lock_irqsave(shost->host_lock, flags);
1676  list_for_each_entry(rport, &fc_host->rports, peers)
1677  fc_rport_set_dev_loss_tmo(rport, val);
1678  spin_unlock_irqrestore(shost->host_lock, flags);
1679  return count;
1680 }
1681 
1683 static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
1684  show_fc_host_dev_loss_tmo,
1685  store_fc_private_host_dev_loss_tmo);
1686 
1687 fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1688 
1689 /*
1690  * Host Statistics Management
1691  */
1692 
1693 /* Show a given an attribute in the statistics group */
1694 static ssize_t
1695 fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
1696 {
1697  struct Scsi_Host *shost = transport_class_to_shost(dev);
1698  struct fc_internal *i = to_fc_internal(shost->transportt);
1699  struct fc_host_statistics *stats;
1700  ssize_t ret = -ENOENT;
1701 
1702  if (offset > sizeof(struct fc_host_statistics) ||
1703  offset % sizeof(u64) != 0)
1704  WARN_ON(1);
1705 
1706  if (i->f->get_fc_host_stats) {
1707  stats = (i->f->get_fc_host_stats)(shost);
1708  if (stats)
1709  ret = snprintf(buf, 20, "0x%llx\n",
1710  (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
1711  }
1712  return ret;
1713 }
1714 
1715 
1716 /* generate a read-only statistics attribute */
1717 #define fc_host_statistic(name) \
1718 static ssize_t show_fcstat_##name(struct device *cd, \
1719  struct device_attribute *attr, \
1720  char *buf) \
1721 { \
1722  return fc_stat_show(cd, buf, \
1723  offsetof(struct fc_host_statistics, name)); \
1724 } \
1725 static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
1726 
1756 
1757 static ssize_t
1758 fc_reset_statistics(struct device *dev, struct device_attribute *attr,
1759  const char *buf, size_t count)
1760 {
1761  struct Scsi_Host *shost = transport_class_to_shost(dev);
1762  struct fc_internal *i = to_fc_internal(shost->transportt);
1763 
1764  /* ignore any data value written to the attribute */
1765  if (i->f->reset_fc_host_stats) {
1766  i->f->reset_fc_host_stats(shost);
1767  return count;
1768  }
1769 
1770  return -ENOENT;
1771 }
1772 static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1773  fc_reset_statistics);
1774 
1775 static struct attribute *fc_statistics_attrs[] = {
1776  &device_attr_host_seconds_since_last_reset.attr,
1777  &device_attr_host_tx_frames.attr,
1778  &device_attr_host_tx_words.attr,
1779  &device_attr_host_rx_frames.attr,
1780  &device_attr_host_rx_words.attr,
1781  &device_attr_host_lip_count.attr,
1782  &device_attr_host_nos_count.attr,
1783  &device_attr_host_error_frames.attr,
1784  &device_attr_host_dumped_frames.attr,
1785  &device_attr_host_link_failure_count.attr,
1786  &device_attr_host_loss_of_sync_count.attr,
1787  &device_attr_host_loss_of_signal_count.attr,
1788  &device_attr_host_prim_seq_protocol_err_count.attr,
1789  &device_attr_host_invalid_tx_word_count.attr,
1790  &device_attr_host_invalid_crc_count.attr,
1791  &device_attr_host_fcp_input_requests.attr,
1792  &device_attr_host_fcp_output_requests.attr,
1793  &device_attr_host_fcp_control_requests.attr,
1794  &device_attr_host_fcp_input_megabytes.attr,
1795  &device_attr_host_fcp_output_megabytes.attr,
1796  &device_attr_host_fcp_packet_alloc_failures.attr,
1797  &device_attr_host_fcp_packet_aborts.attr,
1798  &device_attr_host_fcp_frame_alloc_failures.attr,
1799  &device_attr_host_fc_no_free_exch.attr,
1800  &device_attr_host_fc_no_free_exch_xid.attr,
1801  &device_attr_host_fc_xid_not_found.attr,
1802  &device_attr_host_fc_xid_busy.attr,
1803  &device_attr_host_fc_seq_not_found.attr,
1804  &device_attr_host_fc_non_bls_resp.attr,
1805  &device_attr_host_reset_statistics.attr,
1806  NULL
1807 };
1808 
1809 static struct attribute_group fc_statistics_group = {
1810  .name = "statistics",
1811  .attrs = fc_statistics_attrs,
1812 };
1813 
1814 
1815 /* Host Vport Attributes */
1816 
1817 static int
1818 fc_parse_wwn(const char *ns, u64 *nm)
1819 {
1820  unsigned int i, j;
1821  u8 wwn[8];
1822 
1823  memset(wwn, 0, sizeof(wwn));
1824 
1825  /* Validate and store the new name */
1826  for (i=0, j=0; i < 16; i++) {
1827  int value;
1828 
1829  value = hex_to_bin(*ns++);
1830  if (value >= 0)
1831  j = (j << 4) | value;
1832  else
1833  return -EINVAL;
1834  if (i % 2) {
1835  wwn[i/2] = j & 0xff;
1836  j = 0;
1837  }
1838  }
1839 
1840  *nm = wwn_to_u64(wwn);
1841 
1842  return 0;
1843 }
1844 
1845 
1846 /*
1847  * "Short-cut" sysfs variable to create a new vport on a FC Host.
1848  * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
1849  * will default to a NPIV-based FCP_Initiator; The WWNs are specified
1850  * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
1851  */
1852 static ssize_t
1853 store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1854  const char *buf, size_t count)
1855 {
1856  struct Scsi_Host *shost = transport_class_to_shost(dev);
1857  struct fc_vport_identifiers vid;
1858  struct fc_vport *vport;
1859  unsigned int cnt=count;
1860  int stat;
1861 
1862  memset(&vid, 0, sizeof(vid));
1863 
1864  /* count may include a LF at end of string */
1865  if (buf[cnt-1] == '\n')
1866  cnt--;
1867 
1868  /* validate we have enough characters for WWPN */
1869  if ((cnt != (16+1+16)) || (buf[16] != ':'))
1870  return -EINVAL;
1871 
1872  stat = fc_parse_wwn(&buf[0], &vid.port_name);
1873  if (stat)
1874  return stat;
1875 
1876  stat = fc_parse_wwn(&buf[17], &vid.node_name);
1877  if (stat)
1878  return stat;
1879 
1881  vid.vport_type = FC_PORTTYPE_NPIV;
1882  /* vid.symbolic_name is already zero/NULL's */
1883  vid.disable = false; /* always enabled */
1884 
1885  /* we only allow support on Channel 0 !!! */
1886  stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1887  return stat ? stat : count;
1888 }
1889 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1890  store_fc_host_vport_create);
1891 
1892 
1893 /*
1894  * "Short-cut" sysfs variable to delete a vport on a FC Host.
1895  * Vport is identified by a string containing "<WWPN>:<WWNN>".
1896  * The WWNs are specified as hex characters, and may *not* contain
1897  * any prefixes (e.g. 0x, x, etc)
1898  */
1899 static ssize_t
1900 store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1901  const char *buf, size_t count)
1902 {
1903  struct Scsi_Host *shost = transport_class_to_shost(dev);
1904  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1905  struct fc_vport *vport;
1906  u64 wwpn, wwnn;
1907  unsigned long flags;
1908  unsigned int cnt=count;
1909  int stat, match;
1910 
1911  /* count may include a LF at end of string */
1912  if (buf[cnt-1] == '\n')
1913  cnt--;
1914 
1915  /* validate we have enough characters for WWPN */
1916  if ((cnt != (16+1+16)) || (buf[16] != ':'))
1917  return -EINVAL;
1918 
1919  stat = fc_parse_wwn(&buf[0], &wwpn);
1920  if (stat)
1921  return stat;
1922 
1923  stat = fc_parse_wwn(&buf[17], &wwnn);
1924  if (stat)
1925  return stat;
1926 
1927  spin_lock_irqsave(shost->host_lock, flags);
1928  match = 0;
1929  /* we only allow support on Channel 0 !!! */
1930  list_for_each_entry(vport, &fc_host->vports, peers) {
1931  if ((vport->channel == 0) &&
1932  (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1933  if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1934  break;
1935  vport->flags |= FC_VPORT_DELETING;
1936  match = 1;
1937  break;
1938  }
1939  }
1940  spin_unlock_irqrestore(shost->host_lock, flags);
1941 
1942  if (!match)
1943  return -ENODEV;
1944 
1945  stat = fc_vport_terminate(vport);
1946  return stat ? stat : count;
1947 }
1948 static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1949  store_fc_host_vport_delete);
1950 
1951 
1952 static int fc_host_match(struct attribute_container *cont,
1953  struct device *dev)
1954 {
1955  struct Scsi_Host *shost;
1956  struct fc_internal *i;
1957 
1958  if (!scsi_is_host_device(dev))
1959  return 0;
1960 
1961  shost = dev_to_shost(dev);
1962  if (!shost->transportt || shost->transportt->host_attrs.ac.class
1963  != &fc_host_class.class)
1964  return 0;
1965 
1966  i = to_fc_internal(shost->transportt);
1967 
1968  return &i->t.host_attrs.ac == cont;
1969 }
1970 
1971 static int fc_target_match(struct attribute_container *cont,
1972  struct device *dev)
1973 {
1974  struct Scsi_Host *shost;
1975  struct fc_internal *i;
1976 
1977  if (!scsi_is_target_device(dev))
1978  return 0;
1979 
1980  shost = dev_to_shost(dev->parent);
1981  if (!shost->transportt || shost->transportt->host_attrs.ac.class
1982  != &fc_host_class.class)
1983  return 0;
1984 
1985  i = to_fc_internal(shost->transportt);
1986 
1987  return &i->t.target_attrs.ac == cont;
1988 }
1989 
1990 static void fc_rport_dev_release(struct device *dev)
1991 {
1992  struct fc_rport *rport = dev_to_rport(dev);
1993  put_device(dev->parent);
1994  kfree(rport);
1995 }
1996 
1997 int scsi_is_fc_rport(const struct device *dev)
1998 {
1999  return dev->release == fc_rport_dev_release;
2000 }
2002 
2003 static int fc_rport_match(struct attribute_container *cont,
2004  struct device *dev)
2005 {
2006  struct Scsi_Host *shost;
2007  struct fc_internal *i;
2008 
2009  if (!scsi_is_fc_rport(dev))
2010  return 0;
2011 
2012  shost = dev_to_shost(dev->parent);
2013  if (!shost->transportt || shost->transportt->host_attrs.ac.class
2014  != &fc_host_class.class)
2015  return 0;
2016 
2017  i = to_fc_internal(shost->transportt);
2018 
2019  return &i->rport_attr_cont.ac == cont;
2020 }
2021 
2022 
2023 static void fc_vport_dev_release(struct device *dev)
2024 {
2025  struct fc_vport *vport = dev_to_vport(dev);
2026  put_device(dev->parent); /* release kobj parent */
2027  kfree(vport);
2028 }
2029 
2030 int scsi_is_fc_vport(const struct device *dev)
2031 {
2032  return dev->release == fc_vport_dev_release;
2033 }
2035 
2036 static int fc_vport_match(struct attribute_container *cont,
2037  struct device *dev)
2038 {
2039  struct fc_vport *vport;
2040  struct Scsi_Host *shost;
2041  struct fc_internal *i;
2042 
2043  if (!scsi_is_fc_vport(dev))
2044  return 0;
2045  vport = dev_to_vport(dev);
2046 
2047  shost = vport_to_shost(vport);
2048  if (!shost->transportt || shost->transportt->host_attrs.ac.class
2049  != &fc_host_class.class)
2050  return 0;
2051 
2052  i = to_fc_internal(shost->transportt);
2053  return &i->vport_attr_cont.ac == cont;
2054 }
2055 
2056 
2079 static enum blk_eh_timer_return
2080 fc_timed_out(struct scsi_cmnd *scmd)
2081 {
2082  struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2083 
2084  if (rport->port_state == FC_PORTSTATE_BLOCKED)
2085  return BLK_EH_RESET_TIMER;
2086 
2087  return BLK_EH_NOT_HANDLED;
2088 }
2089 
2090 /*
2091  * Called by fc_user_scan to locate an rport on the shost that
2092  * matches the channel and target id, and invoke scsi_scan_target()
2093  * on the rport.
2094  */
2095 static void
2096 fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2097 {
2098  struct fc_rport *rport;
2099  unsigned long flags;
2100 
2101  spin_lock_irqsave(shost->host_lock, flags);
2102 
2103  list_for_each_entry(rport, &fc_host_rports(shost), peers) {
2104  if (rport->scsi_target_id == -1)
2105  continue;
2106 
2107  if (rport->port_state != FC_PORTSTATE_ONLINE)
2108  continue;
2109 
2110  if ((channel == rport->channel) &&
2111  (id == rport->scsi_target_id)) {
2112  spin_unlock_irqrestore(shost->host_lock, flags);
2113  scsi_scan_target(&rport->dev, channel, id, lun, 1);
2114  return;
2115  }
2116  }
2117 
2118  spin_unlock_irqrestore(shost->host_lock, flags);
2119 }
2120 
2121 /*
2122  * Called via sysfs scan routines. Necessary, as the FC transport
2123  * wants to place all target objects below the rport object. So this
2124  * routine must invoke the scsi_scan_target() routine with the rport
2125  * object as the parent.
2126  */
2127 static int
2128 fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2129 {
2130  uint chlo, chhi;
2131  uint tgtlo, tgthi;
2132 
2133  if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2134  ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2135  ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2136  return -EINVAL;
2137 
2138  if (channel == SCAN_WILD_CARD) {
2139  chlo = 0;
2140  chhi = shost->max_channel + 1;
2141  } else {
2142  chlo = channel;
2143  chhi = channel + 1;
2144  }
2145 
2146  if (id == SCAN_WILD_CARD) {
2147  tgtlo = 0;
2148  tgthi = shost->max_id;
2149  } else {
2150  tgtlo = id;
2151  tgthi = id + 1;
2152  }
2153 
2154  for ( ; chlo < chhi; chlo++)
2155  for ( ; tgtlo < tgthi; tgtlo++)
2156  fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2157 
2158  return 0;
2159 }
2160 
2161 static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
2162  int result)
2163 {
2164  struct fc_internal *i = to_fc_internal(shost->transportt);
2165  return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
2166 }
2167 
2168 static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
2169 {
2170  struct fc_internal *i = to_fc_internal(shost->transportt);
2171  return i->f->it_nexus_response(shost, nexus, result);
2172 }
2173 
2174 struct scsi_transport_template *
2176 {
2177  int count;
2178  struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2179  GFP_KERNEL);
2180 
2181  if (unlikely(!i))
2182  return NULL;
2183 
2184  i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2185  i->t.target_attrs.ac.class = &fc_transport_class.class;
2186  i->t.target_attrs.ac.match = fc_target_match;
2187  i->t.target_size = sizeof(struct fc_starget_attrs);
2188  transport_container_register(&i->t.target_attrs);
2189 
2190  i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2191  i->t.host_attrs.ac.class = &fc_host_class.class;
2192  i->t.host_attrs.ac.match = fc_host_match;
2193  i->t.host_size = sizeof(struct fc_host_attrs);
2194  if (ft->get_fc_host_stats)
2195  i->t.host_attrs.statistics = &fc_statistics_group;
2196  transport_container_register(&i->t.host_attrs);
2197 
2198  i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2199  i->rport_attr_cont.ac.class = &fc_rport_class.class;
2200  i->rport_attr_cont.ac.match = fc_rport_match;
2201  transport_container_register(&i->rport_attr_cont);
2202 
2203  i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2204  i->vport_attr_cont.ac.class = &fc_vport_class.class;
2205  i->vport_attr_cont.ac.match = fc_vport_match;
2206  transport_container_register(&i->vport_attr_cont);
2207 
2208  i->f = ft;
2209 
2210  /* Transport uses the shost workq for scsi scanning */
2211  i->t.create_work_queue = 1;
2212 
2213  i->t.eh_timed_out = fc_timed_out;
2214 
2215  i->t.user_scan = fc_user_scan;
2216 
2217  /* target-mode drivers' functions */
2218  i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
2219  i->t.it_nexus_response = fc_it_nexus_response;
2220 
2221  /*
2222  * Setup SCSI Target Attributes.
2223  */
2224  count = 0;
2228 
2229  BUG_ON(count > FC_STARGET_NUM_ATTRS);
2230 
2231  i->starget_attrs[count] = NULL;
2232 
2233 
2234  /*
2235  * Setup SCSI Host Attributes.
2236  */
2237  count=0;
2245  if (ft->vport_create) {
2247  SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2248  }
2257 
2264  SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2265  SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2266 
2267  /* Transport-managed attributes */
2268  SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2269  SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2270  if (ft->issue_fc_host_lip)
2272  if (ft->vport_create)
2273  SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2274  if (ft->vport_delete)
2275  SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2276 
2277  BUG_ON(count > FC_HOST_NUM_ATTRS);
2278 
2279  i->host_attrs[count] = NULL;
2280 
2281  /*
2282  * Setup Remote Port Attributes.
2283  */
2284  count=0;
2287  SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2295 
2296  BUG_ON(count > FC_RPORT_NUM_ATTRS);
2297 
2298  i->rport_attrs[count] = NULL;
2299 
2300  /*
2301  * Setup Virtual Port Attributes.
2302  */
2303  count=0;
2310  SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2311  SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2312  SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2313 
2314  BUG_ON(count > FC_VPORT_NUM_ATTRS);
2315 
2316  i->vport_attrs[count] = NULL;
2317 
2318  return &i->t;
2319 }
2321 
2323 {
2324  struct fc_internal *i = to_fc_internal(t);
2325 
2326  transport_container_unregister(&i->t.target_attrs);
2327  transport_container_unregister(&i->t.host_attrs);
2328  transport_container_unregister(&i->rport_attr_cont);
2329  transport_container_unregister(&i->vport_attr_cont);
2330 
2331  kfree(i);
2332 }
2334 
2345 static int
2346 fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2347 {
2348  if (unlikely(!fc_host_work_q(shost))) {
2350  "ERROR: FC host '%s' attempted to queue work, "
2351  "when no workqueue created.\n", shost->hostt->name);
2352  dump_stack();
2353 
2354  return -EINVAL;
2355  }
2356 
2357  return queue_work(fc_host_work_q(shost), work);
2358 }
2359 
2364 static void
2365 fc_flush_work(struct Scsi_Host *shost)
2366 {
2367  if (!fc_host_work_q(shost)) {
2369  "ERROR: FC host '%s' attempted to flush work, "
2370  "when no workqueue created.\n", shost->hostt->name);
2371  dump_stack();
2372  return;
2373  }
2374 
2376 }
2377 
2387 static int
2388 fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2389  unsigned long delay)
2390 {
2391  if (unlikely(!fc_host_devloss_work_q(shost))) {
2393  "ERROR: FC host '%s' attempted to queue work, "
2394  "when no workqueue created.\n", shost->hostt->name);
2395  dump_stack();
2396 
2397  return -EINVAL;
2398  }
2399 
2400  return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2401 }
2402 
2407 static void
2408 fc_flush_devloss(struct Scsi_Host *shost)
2409 {
2410  if (!fc_host_devloss_work_q(shost)) {
2412  "ERROR: FC host '%s' attempted to flush work, "
2413  "when no workqueue created.\n", shost->hostt->name);
2414  dump_stack();
2415  return;
2416  }
2417 
2419 }
2420 
2421 
2437 void
2439 {
2440  struct fc_vport *vport = NULL, *next_vport = NULL;
2441  struct fc_rport *rport = NULL, *next_rport = NULL;
2442  struct workqueue_struct *work_q;
2443  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2444  unsigned long flags;
2445 
2446  spin_lock_irqsave(shost->host_lock, flags);
2447 
2448  /* Remove any vports */
2449  list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2450  fc_queue_work(shost, &vport->vport_delete_work);
2451 
2452  /* Remove any remote ports */
2453  list_for_each_entry_safe(rport, next_rport,
2454  &fc_host->rports, peers) {
2455  list_del(&rport->peers);
2457  fc_queue_work(shost, &rport->rport_delete_work);
2458  }
2459 
2460  list_for_each_entry_safe(rport, next_rport,
2461  &fc_host->rport_bindings, peers) {
2462  list_del(&rport->peers);
2464  fc_queue_work(shost, &rport->rport_delete_work);
2465  }
2466 
2467  spin_unlock_irqrestore(shost->host_lock, flags);
2468 
2469  /* flush all scan work items */
2470  scsi_flush_work(shost);
2471 
2472  /* flush all stgt delete, and rport delete work items, then kill it */
2473  if (fc_host->work_q) {
2474  work_q = fc_host->work_q;
2475  fc_host->work_q = NULL;
2476  destroy_workqueue(work_q);
2477  }
2478 
2479  /* flush all devloss work items, then kill it */
2480  if (fc_host->devloss_work_q) {
2481  work_q = fc_host->devloss_work_q;
2482  fc_host->devloss_work_q = NULL;
2483  destroy_workqueue(work_q);
2484  }
2485 }
2487 
2488 static void fc_terminate_rport_io(struct fc_rport *rport)
2489 {
2490  struct Scsi_Host *shost = rport_to_shost(rport);
2491  struct fc_internal *i = to_fc_internal(shost->transportt);
2492 
2493  /* Involve the LLDD if possible to terminate all io on the rport. */
2494  if (i->f->terminate_rport_io)
2495  i->f->terminate_rport_io(rport);
2496 
2497  /*
2498  * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
2499  */
2501 }
2502 
2509 static void
2510 fc_starget_delete(struct work_struct *work)
2511 {
2512  struct fc_rport *rport =
2513  container_of(work, struct fc_rport, stgt_delete_work);
2514 
2515  fc_terminate_rport_io(rport);
2516  scsi_remove_target(&rport->dev);
2517 }
2518 
2519 
2524 static void
2525 fc_rport_final_delete(struct work_struct *work)
2526 {
2527  struct fc_rport *rport =
2528  container_of(work, struct fc_rport, rport_delete_work);
2529  struct device *dev = &rport->dev;
2530  struct Scsi_Host *shost = rport_to_shost(rport);
2531  struct fc_internal *i = to_fc_internal(shost->transportt);
2532  unsigned long flags;
2533  int do_callback = 0;
2534 
2535  fc_terminate_rport_io(rport);
2536 
2537  /*
2538  * if a scan is pending, flush the SCSI Host work_q so that
2539  * that we can reclaim the rport scan work element.
2540  */
2541  if (rport->flags & FC_RPORT_SCAN_PENDING)
2542  scsi_flush_work(shost);
2543 
2544  /*
2545  * Cancel any outstanding timers. These should really exist
2546  * only when rmmod'ing the LLDD and we're asking for
2547  * immediate termination of the rports
2548  */
2549  spin_lock_irqsave(shost->host_lock, flags);
2550  if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2551  spin_unlock_irqrestore(shost->host_lock, flags);
2552  if (!cancel_delayed_work(&rport->fail_io_work))
2553  fc_flush_devloss(shost);
2554  if (!cancel_delayed_work(&rport->dev_loss_work))
2555  fc_flush_devloss(shost);
2556  spin_lock_irqsave(shost->host_lock, flags);
2557  rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2558  }
2559  spin_unlock_irqrestore(shost->host_lock, flags);
2560 
2561  /* Delete SCSI target and sdevs */
2562  if (rport->scsi_target_id != -1)
2563  fc_starget_delete(&rport->stgt_delete_work);
2564 
2565  /*
2566  * Notify the driver that the rport is now dead. The LLDD will
2567  * also guarantee that any communication to the rport is terminated
2568  *
2569  * Avoid this call if we already called it when we preserved the
2570  * rport for the binding.
2571  */
2572  spin_lock_irqsave(shost->host_lock, flags);
2573  if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2574  (i->f->dev_loss_tmo_callbk)) {
2576  do_callback = 1;
2577  }
2578  spin_unlock_irqrestore(shost->host_lock, flags);
2579 
2580  if (do_callback)
2581  i->f->dev_loss_tmo_callbk(rport);
2582 
2583  fc_bsg_remove(rport->rqst_q);
2584 
2586  device_del(dev);
2588  put_device(&shost->shost_gendev); /* for fc_host->rport list */
2589  put_device(dev); /* for self-reference */
2590 }
2591 
2592 
2606 static struct fc_rport *
2607 fc_rport_create(struct Scsi_Host *shost, int channel,
2608  struct fc_rport_identifiers *ids)
2609 {
2610  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2611  struct fc_internal *fci = to_fc_internal(shost->transportt);
2612  struct fc_rport *rport;
2613  struct device *dev;
2614  unsigned long flags;
2615  int error;
2616  size_t size;
2617 
2618  size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2619  rport = kzalloc(size, GFP_KERNEL);
2620  if (unlikely(!rport)) {
2621  printk(KERN_ERR "%s: allocation failure\n", __func__);
2622  return NULL;
2623  }
2624 
2625  rport->maxframe_size = -1;
2627  rport->dev_loss_tmo = fc_host->dev_loss_tmo;
2628  memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2629  memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2630  rport->port_id = ids->port_id;
2631  rport->roles = ids->roles;
2633  if (fci->f->dd_fcrport_size)
2634  rport->dd_data = &rport[1];
2635  rport->channel = channel;
2636  rport->fast_io_fail_tmo = -1;
2637 
2638  INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
2639  INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
2640  INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
2641  INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
2642  INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
2643 
2644  spin_lock_irqsave(shost->host_lock, flags);
2645 
2646  rport->number = fc_host->next_rport_number++;
2647  if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
2648  rport->scsi_target_id = fc_host->next_target_id++;
2649  else
2650  rport->scsi_target_id = -1;
2651  list_add_tail(&rport->peers, &fc_host->rports);
2652  get_device(&shost->shost_gendev); /* for fc_host->rport list */
2653 
2654  spin_unlock_irqrestore(shost->host_lock, flags);
2655 
2656  dev = &rport->dev;
2657  device_initialize(dev); /* takes self reference */
2658  dev->parent = get_device(&shost->shost_gendev); /* parent reference */
2659  dev->release = fc_rport_dev_release;
2660  dev_set_name(dev, "rport-%d:%d-%d",
2661  shost->host_no, channel, rport->number);
2663 
2664  error = device_add(dev);
2665  if (error) {
2666  printk(KERN_ERR "FC Remote Port device_add failed\n");
2667  goto delete_rport;
2668  }
2669  transport_add_device(dev);
2671 
2672  fc_bsg_rportadd(shost, rport);
2673  /* ignore any bsg add error - we just can't do sgio */
2674 
2675  if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2676  /* initiate a scan of the target */
2677  rport->flags |= FC_RPORT_SCAN_PENDING;
2678  scsi_queue_work(shost, &rport->scan_work);
2679  }
2680 
2681  return rport;
2682 
2683 delete_rport:
2685  spin_lock_irqsave(shost->host_lock, flags);
2686  list_del(&rport->peers);
2687  put_device(&shost->shost_gendev); /* for fc_host->rport list */
2688  spin_unlock_irqrestore(shost->host_lock, flags);
2689  put_device(dev->parent);
2690  kfree(rport);
2691  return NULL;
2692 }
2693 
2732 struct fc_rport *
2733 fc_remote_port_add(struct Scsi_Host *shost, int channel,
2734  struct fc_rport_identifiers *ids)
2735 {
2736  struct fc_internal *fci = to_fc_internal(shost->transportt);
2737  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2738  struct fc_rport *rport;
2739  unsigned long flags;
2740  int match = 0;
2741 
2742  /* ensure any stgt delete functions are done */
2743  fc_flush_work(shost);
2744 
2745  /*
2746  * Search the list of "active" rports, for an rport that has been
2747  * deleted, but we've held off the real delete while the target
2748  * is in a "blocked" state.
2749  */
2750  spin_lock_irqsave(shost->host_lock, flags);
2751 
2752  list_for_each_entry(rport, &fc_host->rports, peers) {
2753 
2754  if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
2755  (rport->channel == channel)) {
2756 
2757  switch (fc_host->tgtid_bind_type) {
2758  case FC_TGTID_BIND_BY_WWPN:
2759  case FC_TGTID_BIND_NONE:
2760  if (rport->port_name == ids->port_name)
2761  match = 1;
2762  break;
2763  case FC_TGTID_BIND_BY_WWNN:
2764  if (rport->node_name == ids->node_name)
2765  match = 1;
2766  break;
2767  case FC_TGTID_BIND_BY_ID:
2768  if (rport->port_id == ids->port_id)
2769  match = 1;
2770  break;
2771  }
2772 
2773  if (match) {
2774 
2775  memcpy(&rport->node_name, &ids->node_name,
2776  sizeof(rport->node_name));
2777  memcpy(&rport->port_name, &ids->port_name,
2778  sizeof(rport->port_name));
2779  rport->port_id = ids->port_id;
2780 
2782  rport->roles = ids->roles;
2783 
2784  spin_unlock_irqrestore(shost->host_lock, flags);
2785 
2786  if (fci->f->dd_fcrport_size)
2787  memset(rport->dd_data, 0,
2788  fci->f->dd_fcrport_size);
2789 
2790  /*
2791  * If we were not a target, cancel the
2792  * io terminate and rport timers, and
2793  * we're done.
2794  *
2795  * If we were a target, but our new role
2796  * doesn't indicate a target, leave the
2797  * timers running expecting the role to
2798  * change as the target fully logs in. If
2799  * it doesn't, the target will be torn down.
2800  *
2801  * If we were a target, and our role shows
2802  * we're still a target, cancel the timers
2803  * and kick off a scan.
2804  */
2805 
2806  /* was a target, not in roles */
2807  if ((rport->scsi_target_id != -1) &&
2808  (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2809  return rport;
2810 
2811  /*
2812  * Stop the fail io and dev_loss timers.
2813  * If they flush, the port_state will
2814  * be checked and will NOOP the function.
2815  */
2816  if (!cancel_delayed_work(&rport->fail_io_work))
2817  fc_flush_devloss(shost);
2818  if (!cancel_delayed_work(&rport->dev_loss_work))
2819  fc_flush_devloss(shost);
2820 
2821  spin_lock_irqsave(shost->host_lock, flags);
2822 
2823  rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2826 
2827  spin_unlock_irqrestore(shost->host_lock, flags);
2828 
2829  /* if target, initiate a scan */
2830  if (rport->scsi_target_id != -1) {
2831  scsi_target_unblock(&rport->dev,
2832  SDEV_RUNNING);
2834  flags);
2835  rport->flags |= FC_RPORT_SCAN_PENDING;
2836  scsi_queue_work(shost,
2837  &rport->scan_work);
2838  spin_unlock_irqrestore(shost->host_lock,
2839  flags);
2840  }
2841 
2842  fc_bsg_goose_queue(rport);
2843 
2844  return rport;
2845  }
2846  }
2847  }
2848 
2849  /*
2850  * Search the bindings array
2851  * Note: if never a FCP target, you won't be on this list
2852  */
2853  if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
2854 
2855  /* search for a matching consistent binding */
2856 
2857  list_for_each_entry(rport, &fc_host->rport_bindings,
2858  peers) {
2859  if (rport->channel != channel)
2860  continue;
2861 
2862  switch (fc_host->tgtid_bind_type) {
2863  case FC_TGTID_BIND_BY_WWPN:
2864  if (rport->port_name == ids->port_name)
2865  match = 1;
2866  break;
2867  case FC_TGTID_BIND_BY_WWNN:
2868  if (rport->node_name == ids->node_name)
2869  match = 1;
2870  break;
2871  case FC_TGTID_BIND_BY_ID:
2872  if (rport->port_id == ids->port_id)
2873  match = 1;
2874  break;
2875  case FC_TGTID_BIND_NONE: /* to keep compiler happy */
2876  break;
2877  }
2878 
2879  if (match) {
2880  list_move_tail(&rport->peers, &fc_host->rports);
2881  break;
2882  }
2883  }
2884 
2885  if (match) {
2886  memcpy(&rport->node_name, &ids->node_name,
2887  sizeof(rport->node_name));
2888  memcpy(&rport->port_name, &ids->port_name,
2889  sizeof(rport->port_name));
2890  rport->port_id = ids->port_id;
2891  rport->roles = ids->roles;
2894 
2895  if (fci->f->dd_fcrport_size)
2896  memset(rport->dd_data, 0,
2897  fci->f->dd_fcrport_size);
2898  spin_unlock_irqrestore(shost->host_lock, flags);
2899 
2900  if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
2902 
2903  /* initiate a scan of the target */
2904  spin_lock_irqsave(shost->host_lock, flags);
2905  rport->flags |= FC_RPORT_SCAN_PENDING;
2906  scsi_queue_work(shost, &rport->scan_work);
2907  spin_unlock_irqrestore(shost->host_lock, flags);
2908  }
2909  return rport;
2910  }
2911  }
2912 
2913  spin_unlock_irqrestore(shost->host_lock, flags);
2914 
2915  /* No consistent binding found - create new remote port entry */
2916  rport = fc_rport_create(shost, channel, ids);
2917 
2918  return rport;
2919 }
2921 
2922 
2973 void
2975 {
2976  struct Scsi_Host *shost = rport_to_shost(rport);
2977  unsigned long timeout = rport->dev_loss_tmo;
2978  unsigned long flags;
2979 
2980  /*
2981  * No need to flush the fc_host work_q's, as all adds are synchronous.
2982  *
2983  * We do need to reclaim the rport scan work element, so eventually
2984  * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
2985  * there's still a scan pending.
2986  */
2987 
2988  spin_lock_irqsave(shost->host_lock, flags);
2989 
2990  if (rport->port_state != FC_PORTSTATE_ONLINE) {
2991  spin_unlock_irqrestore(shost->host_lock, flags);
2992  return;
2993  }
2994 
2995  /*
2996  * In the past, we if this was not an FCP-Target, we would
2997  * unconditionally just jump to deleting the rport.
2998  * However, rports can be used as node containers by the LLDD,
2999  * and its not appropriate to just terminate the rport at the
3000  * first sign of a loss in connectivity. The LLDD may want to
3001  * send ELS traffic to re-validate the login. If the rport is
3002  * immediately deleted, it makes it inappropriate for a node
3003  * container.
3004  * So... we now unconditionally wait dev_loss_tmo before
3005  * destroying an rport.
3006  */
3007 
3009 
3010  rport->flags |= FC_RPORT_DEVLOSS_PENDING;
3011 
3012  spin_unlock_irqrestore(shost->host_lock, flags);
3013 
3014  if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR &&
3015  shost->active_mode & MODE_TARGET)
3016  fc_tgt_it_nexus_destroy(shost, (unsigned long)rport);
3017 
3018  scsi_target_block(&rport->dev);
3019 
3020  /* see if we need to kill io faster than waiting for device loss */
3021  if ((rport->fast_io_fail_tmo != -1) &&
3022  (rport->fast_io_fail_tmo < timeout))
3023  fc_queue_devloss_work(shost, &rport->fail_io_work,
3024  rport->fast_io_fail_tmo * HZ);
3025 
3026  /* cap the length the devices can be blocked until they are deleted */
3027  fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
3028 }
3030 
3051 void
3053 {
3054  struct Scsi_Host *shost = rport_to_shost(rport);
3055  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3056  unsigned long flags;
3057  int create = 0;
3058  int ret;
3059 
3060  spin_lock_irqsave(shost->host_lock, flags);
3061  if (roles & FC_PORT_ROLE_FCP_TARGET) {
3062  if (rport->scsi_target_id == -1) {
3063  rport->scsi_target_id = fc_host->next_target_id++;
3064  create = 1;
3065  } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
3066  create = 1;
3067  } else if (shost->active_mode & MODE_TARGET) {
3068  ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport,
3069  (char *)&rport->node_name);
3070  if (ret)
3071  printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n",
3072  ret);
3073  }
3074 
3075  rport->roles = roles;
3076 
3077  spin_unlock_irqrestore(shost->host_lock, flags);
3078 
3079  if (create) {
3080  /*
3081  * There may have been a delete timer running on the
3082  * port. Ensure that it is cancelled as we now know
3083  * the port is an FCP Target.
3084  * Note: we know the rport is exists and in an online
3085  * state as the LLDD would not have had an rport
3086  * reference to pass us.
3087  *
3088  * Take no action on the del_timer failure as the state
3089  * machine state change will validate the
3090  * transaction.
3091  */
3092  if (!cancel_delayed_work(&rport->fail_io_work))
3093  fc_flush_devloss(shost);
3094  if (!cancel_delayed_work(&rport->dev_loss_work))
3095  fc_flush_devloss(shost);
3096 
3097  spin_lock_irqsave(shost->host_lock, flags);
3098  rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3101  spin_unlock_irqrestore(shost->host_lock, flags);
3102 
3103  /* ensure any stgt delete functions are done */
3104  fc_flush_work(shost);
3105 
3107  /* initiate a scan of the target */
3108  spin_lock_irqsave(shost->host_lock, flags);
3109  rport->flags |= FC_RPORT_SCAN_PENDING;
3110  scsi_queue_work(shost, &rport->scan_work);
3111  spin_unlock_irqrestore(shost->host_lock, flags);
3112  }
3113 }
3115 
3123 static void
3124 fc_timeout_deleted_rport(struct work_struct *work)
3125 {
3126  struct fc_rport *rport =
3127  container_of(work, struct fc_rport, dev_loss_work.work);
3128  struct Scsi_Host *shost = rport_to_shost(rport);
3129  struct fc_internal *i = to_fc_internal(shost->transportt);
3130  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3131  unsigned long flags;
3132  int do_callback = 0;
3133 
3134  spin_lock_irqsave(shost->host_lock, flags);
3135 
3136  rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3137 
3138  /*
3139  * If the port is ONLINE, then it came back. If it was a SCSI
3140  * target, validate it still is. If not, tear down the
3141  * scsi_target on it.
3142  */
3143  if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3144  (rport->scsi_target_id != -1) &&
3145  !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3146  dev_printk(KERN_ERR, &rport->dev,
3147  "blocked FC remote port time out: no longer"
3148  " a FCP target, removing starget\n");
3149  spin_unlock_irqrestore(shost->host_lock, flags);
3151  fc_queue_work(shost, &rport->stgt_delete_work);
3152  return;
3153  }
3154 
3155  /* NOOP state - we're flushing workq's */
3156  if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3157  spin_unlock_irqrestore(shost->host_lock, flags);
3158  dev_printk(KERN_ERR, &rport->dev,
3159  "blocked FC remote port time out: leaving"
3160  " rport%s alone\n",
3161  (rport->scsi_target_id != -1) ? " and starget" : "");
3162  return;
3163  }
3164 
3165  if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3166  (rport->scsi_target_id == -1)) {
3167  list_del(&rport->peers);
3169  dev_printk(KERN_ERR, &rport->dev,
3170  "blocked FC remote port time out: removing"
3171  " rport%s\n",
3172  (rport->scsi_target_id != -1) ? " and starget" : "");
3173  fc_queue_work(shost, &rport->rport_delete_work);
3174  spin_unlock_irqrestore(shost->host_lock, flags);
3175  return;
3176  }
3177 
3178  dev_printk(KERN_ERR, &rport->dev,
3179  "blocked FC remote port time out: removing target and "
3180  "saving binding\n");
3181 
3182  list_move_tail(&rport->peers, &fc_host->rport_bindings);
3183 
3184  /*
3185  * Note: We do not remove or clear the hostdata area. This allows
3186  * host-specific target data to persist along with the
3187  * scsi_target_id. It's up to the host to manage it's hostdata area.
3188  */
3189 
3190  /*
3191  * Reinitialize port attributes that may change if the port comes back.
3192  */
3193  rport->maxframe_size = -1;
3195  rport->roles = FC_PORT_ROLE_UNKNOWN;
3198 
3199  /*
3200  * Pre-emptively kill I/O rather than waiting for the work queue
3201  * item to teardown the starget. (FCOE libFC folks prefer this
3202  * and to have the rport_port_id still set when it's done).
3203  */
3204  spin_unlock_irqrestore(shost->host_lock, flags);
3205  fc_terminate_rport_io(rport);
3206 
3207  spin_lock_irqsave(shost->host_lock, flags);
3208 
3209  if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
3210 
3211  /* remove the identifiers that aren't used in the consisting binding */
3212  switch (fc_host->tgtid_bind_type) {
3213  case FC_TGTID_BIND_BY_WWPN:
3214  rport->node_name = -1;
3215  rport->port_id = -1;
3216  break;
3217  case FC_TGTID_BIND_BY_WWNN:
3218  rport->port_name = -1;
3219  rport->port_id = -1;
3220  break;
3221  case FC_TGTID_BIND_BY_ID:
3222  rport->node_name = -1;
3223  rport->port_name = -1;
3224  break;
3225  case FC_TGTID_BIND_NONE: /* to keep compiler happy */
3226  break;
3227  }
3228 
3229  /*
3230  * As this only occurs if the remote port (scsi target)
3231  * went away and didn't come back - we'll remove
3232  * all attached scsi devices.
3233  */
3235  fc_queue_work(shost, &rport->stgt_delete_work);
3236 
3237  do_callback = 1;
3238  }
3239 
3240  spin_unlock_irqrestore(shost->host_lock, flags);
3241 
3242  /*
3243  * Notify the driver that the rport is now dead. The LLDD will
3244  * also guarantee that any communication to the rport is terminated
3245  *
3246  * Note: we set the CALLBK_DONE flag above to correspond
3247  */
3248  if (do_callback && i->f->dev_loss_tmo_callbk)
3249  i->f->dev_loss_tmo_callbk(rport);
3250 }
3251 
3252 
3260 static void
3261 fc_timeout_fail_rport_io(struct work_struct *work)
3262 {
3263  struct fc_rport *rport =
3264  container_of(work, struct fc_rport, fail_io_work.work);
3265 
3266  if (rport->port_state != FC_PORTSTATE_BLOCKED)
3267  return;
3268 
3270  fc_terminate_rport_io(rport);
3271 }
3272 
3277 static void
3278 fc_scsi_scan_rport(struct work_struct *work)
3279 {
3280  struct fc_rport *rport =
3281  container_of(work, struct fc_rport, scan_work);
3282  struct Scsi_Host *shost = rport_to_shost(rport);
3283  struct fc_internal *i = to_fc_internal(shost->transportt);
3284  unsigned long flags;
3285 
3286  if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3287  (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3288  !(i->f->disable_target_scan)) {
3289  scsi_scan_target(&rport->dev, rport->channel,
3290  rport->scsi_target_id, SCAN_WILD_CARD, 1);
3291  }
3292 
3293  spin_lock_irqsave(shost->host_lock, flags);
3294  rport->flags &= ~FC_RPORT_SCAN_PENDING;
3295  spin_unlock_irqrestore(shost->host_lock, flags);
3296 }
3297 
3312 int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3313 {
3314  struct Scsi_Host *shost = cmnd->device->host;
3315  struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3316  unsigned long flags;
3317 
3318  spin_lock_irqsave(shost->host_lock, flags);
3319  while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3320  !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3321  spin_unlock_irqrestore(shost->host_lock, flags);
3322  msleep(1000);
3323  spin_lock_irqsave(shost->host_lock, flags);
3324  }
3325  spin_unlock_irqrestore(shost->host_lock, flags);
3326 
3327  if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3328  return FAST_IO_FAIL;
3329 
3330  return 0;
3331 }
3333 
3349 static int
3350 fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3351  struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3352 {
3353  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3354  struct fc_internal *fci = to_fc_internal(shost->transportt);
3355  struct fc_vport *vport;
3356  struct device *dev;
3357  unsigned long flags;
3358  size_t size;
3359  int error;
3360 
3361  *ret_vport = NULL;
3362 
3363  if ( ! fci->f->vport_create)
3364  return -ENOENT;
3365 
3366  size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3367  vport = kzalloc(size, GFP_KERNEL);
3368  if (unlikely(!vport)) {
3369  printk(KERN_ERR "%s: allocation failure\n", __func__);
3370  return -ENOMEM;
3371  }
3372 
3373  vport->vport_state = FC_VPORT_UNKNOWN;
3375  vport->node_name = ids->node_name;
3376  vport->port_name = ids->port_name;
3377  vport->roles = ids->roles;
3378  vport->vport_type = ids->vport_type;
3379  if (fci->f->dd_fcvport_size)
3380  vport->dd_data = &vport[1];
3381  vport->shost = shost;
3382  vport->channel = channel;
3383  vport->flags = FC_VPORT_CREATING;
3384  INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3385 
3386  spin_lock_irqsave(shost->host_lock, flags);
3387 
3388  if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3389  spin_unlock_irqrestore(shost->host_lock, flags);
3390  kfree(vport);
3391  return -ENOSPC;
3392  }
3393  fc_host->npiv_vports_inuse++;
3394  vport->number = fc_host->next_vport_number++;
3395  list_add_tail(&vport->peers, &fc_host->vports);
3396  get_device(&shost->shost_gendev); /* for fc_host->vport list */
3397 
3398  spin_unlock_irqrestore(shost->host_lock, flags);
3399 
3400  dev = &vport->dev;
3401  device_initialize(dev); /* takes self reference */
3402  dev->parent = get_device(pdev); /* takes parent reference */
3403  dev->release = fc_vport_dev_release;
3404  dev_set_name(dev, "vport-%d:%d-%d",
3405  shost->host_no, channel, vport->number);
3407 
3408  error = device_add(dev);
3409  if (error) {
3410  printk(KERN_ERR "FC Virtual Port device_add failed\n");
3411  goto delete_vport;
3412  }
3413  transport_add_device(dev);
3415 
3416  error = fci->f->vport_create(vport, ids->disable);
3417  if (error) {
3418  printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3419  goto delete_vport_all;
3420  }
3421 
3422  /*
3423  * if the parent isn't the physical adapter's Scsi_Host, ensure
3424  * the Scsi_Host at least contains ia symlink to the vport.
3425  */
3426  if (pdev != &shost->shost_gendev) {
3427  error = sysfs_create_link(&shost->shost_gendev.kobj,
3428  &dev->kobj, dev_name(dev));
3429  if (error)
3431  "%s: Cannot create vport symlinks for "
3432  "%s, err=%d\n",
3433  __func__, dev_name(dev), error);
3434  }
3435  spin_lock_irqsave(shost->host_lock, flags);
3436  vport->flags &= ~FC_VPORT_CREATING;
3437  spin_unlock_irqrestore(shost->host_lock, flags);
3438 
3439  dev_printk(KERN_NOTICE, pdev,
3440  "%s created via shost%d channel %d\n", dev_name(dev),
3441  shost->host_no, channel);
3442 
3443  *ret_vport = vport;
3444 
3445  return 0;
3446 
3447 delete_vport_all:
3449  device_del(dev);
3450 delete_vport:
3452  spin_lock_irqsave(shost->host_lock, flags);
3453  list_del(&vport->peers);
3454  put_device(&shost->shost_gendev); /* for fc_host->vport list */
3455  fc_host->npiv_vports_inuse--;
3456  spin_unlock_irqrestore(shost->host_lock, flags);
3457  put_device(dev->parent);
3458  kfree(vport);
3459 
3460  return error;
3461 }
3462 
3473 struct fc_vport *
3474 fc_vport_create(struct Scsi_Host *shost, int channel,
3475  struct fc_vport_identifiers *ids)
3476 {
3477  int stat;
3478  struct fc_vport *vport;
3479 
3480  stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3481  ids, &vport);
3482  return stat ? NULL : vport;
3483 }
3485 
3496 int
3498 {
3499  struct Scsi_Host *shost = vport_to_shost(vport);
3500  struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3501  struct fc_internal *i = to_fc_internal(shost->transportt);
3502  struct device *dev = &vport->dev;
3503  unsigned long flags;
3504  int stat;
3505 
3506  if (i->f->vport_delete)
3507  stat = i->f->vport_delete(vport);
3508  else
3509  stat = -ENOENT;
3510 
3511  spin_lock_irqsave(shost->host_lock, flags);
3512  vport->flags &= ~FC_VPORT_DELETING;
3513  if (!stat) {
3514  vport->flags |= FC_VPORT_DELETED;
3515  list_del(&vport->peers);
3516  fc_host->npiv_vports_inuse--;
3517  put_device(&shost->shost_gendev); /* for fc_host->vport list */
3518  }
3519  spin_unlock_irqrestore(shost->host_lock, flags);
3520 
3521  if (stat)
3522  return stat;
3523 
3524  if (dev->parent != &shost->shost_gendev)
3525  sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3527  device_del(dev);
3529 
3530  /*
3531  * Removing our self-reference should mean our
3532  * release function gets called, which will drop the remaining
3533  * parent reference and free the data structure.
3534  */
3535  put_device(dev); /* for self-reference */
3536 
3537  return 0; /* SUCCESS */
3538 }
3540 
3545 static void
3546 fc_vport_sched_delete(struct work_struct *work)
3547 {
3548  struct fc_vport *vport =
3549  container_of(work, struct fc_vport, vport_delete_work);
3550  int stat;
3551 
3552  stat = fc_vport_terminate(vport);
3553  if (stat)
3554  dev_printk(KERN_ERR, vport->dev.parent,
3555  "%s: %s could not be deleted created via "
3556  "shost%d channel %d - error %d\n", __func__,
3557  dev_name(&vport->dev), vport->shost->host_no,
3558  vport->channel, stat);
3559 }
3560 
3561 
3562 /*
3563  * BSG support
3564  */
3565 
3566 
3571 static void
3572 fc_destroy_bsgjob(struct fc_bsg_job *job)
3573 {
3574  unsigned long flags;
3575 
3576  spin_lock_irqsave(&job->job_lock, flags);
3577  if (job->ref_cnt) {
3578  spin_unlock_irqrestore(&job->job_lock, flags);
3579  return;
3580  }
3581  spin_unlock_irqrestore(&job->job_lock, flags);
3582 
3583  put_device(job->dev); /* release reference for the request */
3584 
3585  kfree(job->request_payload.sg_list);
3586  kfree(job->reply_payload.sg_list);
3587  kfree(job);
3588 }
3589 
3595 static void
3596 fc_bsg_jobdone(struct fc_bsg_job *job)
3597 {
3598  struct request *req = job->req;
3599  struct request *rsp = req->next_rq;
3600  int err;
3601 
3602  err = job->req->errors = job->reply->result;
3603 
3604  if (err < 0)
3605  /* we're only returning the result field in the reply */
3606  job->req->sense_len = sizeof(uint32_t);
3607  else
3608  job->req->sense_len = job->reply_len;
3609 
3610  /* we assume all request payload was transferred, residual == 0 */
3611  req->resid_len = 0;
3612 
3613  if (rsp) {
3614  WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3615 
3616  /* set reply (bidi) residual */
3617  rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3618  rsp->resid_len);
3619  }
3620  blk_complete_request(req);
3621 }
3622 
3627 static void fc_bsg_softirq_done(struct request *rq)
3628 {
3629  struct fc_bsg_job *job = rq->special;
3630  unsigned long flags;
3631 
3632  spin_lock_irqsave(&job->job_lock, flags);
3634  job->ref_cnt--;
3635  spin_unlock_irqrestore(&job->job_lock, flags);
3636 
3637  blk_end_request_all(rq, rq->errors);
3638  fc_destroy_bsgjob(job);
3639 }
3640 
3645 static enum blk_eh_timer_return
3646 fc_bsg_job_timeout(struct request *req)
3647 {
3648  struct fc_bsg_job *job = (void *) req->special;
3649  struct Scsi_Host *shost = job->shost;
3650  struct fc_internal *i = to_fc_internal(shost->transportt);
3651  unsigned long flags;
3652  int err = 0, done = 0;
3653 
3654  if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3655  return BLK_EH_RESET_TIMER;
3656 
3657  spin_lock_irqsave(&job->job_lock, flags);
3658  if (job->state_flags & FC_RQST_STATE_DONE)
3659  done = 1;
3660  else
3661  job->ref_cnt++;
3662  spin_unlock_irqrestore(&job->job_lock, flags);
3663 
3664  if (!done && i->f->bsg_timeout) {
3665  /* call LLDD to abort the i/o as it has timed out */
3666  err = i->f->bsg_timeout(job);
3667  if (err == -EAGAIN) {
3668  job->ref_cnt--;
3669  return BLK_EH_RESET_TIMER;
3670  } else if (err)
3671  printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3672  "abort failed with status %d\n", err);
3673  }
3674 
3675  /* the blk_end_sync_io() doesn't check the error */
3676  if (done)
3677  return BLK_EH_NOT_HANDLED;
3678  else
3679  return BLK_EH_HANDLED;
3680 }
3681 
3682 static int
3683 fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3684 {
3685  size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3686 
3687  BUG_ON(!req->nr_phys_segments);
3688 
3689  buf->sg_list = kzalloc(sz, GFP_KERNEL);
3690  if (!buf->sg_list)
3691  return -ENOMEM;
3692  sg_init_table(buf->sg_list, req->nr_phys_segments);
3693  buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3694  buf->payload_len = blk_rq_bytes(req);
3695  return 0;
3696 }
3697 
3698 
3706 static int
3707 fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3708  struct request *req)
3709 {
3710  struct fc_internal *i = to_fc_internal(shost->transportt);
3711  struct request *rsp = req->next_rq;
3712  struct fc_bsg_job *job;
3713  int ret;
3714 
3715  BUG_ON(req->special);
3716 
3717  job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3718  GFP_KERNEL);
3719  if (!job)
3720  return -ENOMEM;
3721 
3722  /*
3723  * Note: this is a bit silly.
3724  * The request gets formatted as a SGIO v4 ioctl request, which
3725  * then gets reformatted as a blk request, which then gets
3726  * reformatted as a fc bsg request. And on completion, we have
3727  * to wrap return results such that SGIO v4 thinks it was a scsi
3728  * status. I hope this was all worth it.
3729  */
3730 
3731  req->special = job;
3732  job->shost = shost;
3733  job->rport = rport;
3734  job->req = req;
3735  if (i->f->dd_bsg_size)
3736  job->dd_data = (void *)&job[1];
3737  spin_lock_init(&job->job_lock);
3738  job->request = (struct fc_bsg_request *)req->cmd;
3739  job->request_len = req->cmd_len;
3740  job->reply = req->sense;
3741  job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
3742  * allocated */
3743  if (req->bio) {
3744  ret = fc_bsg_map_buffer(&job->request_payload, req);
3745  if (ret)
3746  goto failjob_rls_job;
3747  }
3748  if (rsp && rsp->bio) {
3749  ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3750  if (ret)
3751  goto failjob_rls_rqst_payload;
3752  }
3753  job->job_done = fc_bsg_jobdone;
3754  if (rport)
3755  job->dev = &rport->dev;
3756  else
3757  job->dev = &shost->shost_gendev;
3758  get_device(job->dev); /* take a reference for the request */
3759 
3760  job->ref_cnt = 1;
3761 
3762  return 0;
3763 
3764 
3765 failjob_rls_rqst_payload:
3766  kfree(job->request_payload.sg_list);
3767 failjob_rls_job:
3768  kfree(job);
3769  return -ENOMEM;
3770 }
3771 
3772 
3774  FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
3775  FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
3776  FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
3777 };
3778 
3779 
3786 static enum fc_dispatch_result
3787 fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3788  struct fc_bsg_job *job)
3789 {
3790  struct fc_internal *i = to_fc_internal(shost->transportt);
3791  int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3792  int ret;
3793 
3794  /* Validate the host command */
3795  switch (job->request->msgcode) {
3796  case FC_BSG_HST_ADD_RPORT:
3797  cmdlen += sizeof(struct fc_bsg_host_add_rport);
3798  break;
3799 
3800  case FC_BSG_HST_DEL_RPORT:
3801  cmdlen += sizeof(struct fc_bsg_host_del_rport);
3802  break;
3803 
3805  cmdlen += sizeof(struct fc_bsg_host_els);
3806  /* there better be a xmt and rcv payloads */
3807  if ((!job->request_payload.payload_len) ||
3808  (!job->reply_payload.payload_len)) {
3809  ret = -EINVAL;
3810  goto fail_host_msg;
3811  }
3812  break;
3813 
3814  case FC_BSG_HST_CT:
3815  cmdlen += sizeof(struct fc_bsg_host_ct);
3816  /* there better be xmt and rcv payloads */
3817  if ((!job->request_payload.payload_len) ||
3818  (!job->reply_payload.payload_len)) {
3819  ret = -EINVAL;
3820  goto fail_host_msg;
3821  }
3822  break;
3823 
3824  case FC_BSG_HST_VENDOR:
3825  cmdlen += sizeof(struct fc_bsg_host_vendor);
3826  if ((shost->hostt->vendor_id == 0L) ||
3827  (job->request->rqst_data.h_vendor.vendor_id !=
3828  shost->hostt->vendor_id)) {
3829  ret = -ESRCH;
3830  goto fail_host_msg;
3831  }
3832  break;
3833 
3834  default:
3835  ret = -EBADR;
3836  goto fail_host_msg;
3837  }
3838 
3839  /* check if we really have all the request data needed */
3840  if (job->request_len < cmdlen) {
3841  ret = -ENOMSG;
3842  goto fail_host_msg;
3843  }
3844 
3845  ret = i->f->bsg_request(job);
3846  if (!ret)
3847  return FC_DISPATCH_UNLOCKED;
3848 
3849 fail_host_msg:
3850  /* return the errno failure code as the only status */
3851  BUG_ON(job->reply_len < sizeof(uint32_t));
3852  job->reply->reply_payload_rcv_len = 0;
3853  job->reply->result = ret;
3854  job->reply_len = sizeof(uint32_t);
3855  fc_bsg_jobdone(job);
3856  return FC_DISPATCH_UNLOCKED;
3857 }
3858 
3859 
3860 /*
3861  * fc_bsg_goose_queue - restart rport queue in case it was stopped
3862  * @rport: rport to be restarted
3863  */
3864 static void
3865 fc_bsg_goose_queue(struct fc_rport *rport)
3866 {
3867  if (!rport->rqst_q)
3868  return;
3869 
3870  /*
3871  * This get/put dance makes no sense
3872  */
3873  get_device(&rport->dev);
3874  blk_run_queue_async(rport->rqst_q);
3875  put_device(&rport->dev);
3876 }
3877 
3885 static enum fc_dispatch_result
3886 fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3887  struct fc_rport *rport, struct fc_bsg_job *job)
3888 {
3889  struct fc_internal *i = to_fc_internal(shost->transportt);
3890  int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3891  int ret;
3892 
3893  /* Validate the rport command */
3894  switch (job->request->msgcode) {
3895  case FC_BSG_RPT_ELS:
3896  cmdlen += sizeof(struct fc_bsg_rport_els);
3897  goto check_bidi;
3898 
3899  case FC_BSG_RPT_CT:
3900  cmdlen += sizeof(struct fc_bsg_rport_ct);
3901 check_bidi:
3902  /* there better be xmt and rcv payloads */
3903  if ((!job->request_payload.payload_len) ||
3904  (!job->reply_payload.payload_len)) {
3905  ret = -EINVAL;
3906  goto fail_rport_msg;
3907  }
3908  break;
3909  default:
3910  ret = -EBADR;
3911  goto fail_rport_msg;
3912  }
3913 
3914  /* check if we really have all the request data needed */
3915  if (job->request_len < cmdlen) {
3916  ret = -ENOMSG;
3917  goto fail_rport_msg;
3918  }
3919 
3920  ret = i->f->bsg_request(job);
3921  if (!ret)
3922  return FC_DISPATCH_UNLOCKED;
3923 
3924 fail_rport_msg:
3925  /* return the errno failure code as the only status */
3926  BUG_ON(job->reply_len < sizeof(uint32_t));
3927  job->reply->reply_payload_rcv_len = 0;
3928  job->reply->result = ret;
3929  job->reply_len = sizeof(uint32_t);
3930  fc_bsg_jobdone(job);
3931  return FC_DISPATCH_UNLOCKED;
3932 }
3933 
3934 
3942 static void
3943 fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3944  struct fc_rport *rport, struct device *dev)
3945 {
3946  struct request *req;
3947  struct fc_bsg_job *job;
3948  enum fc_dispatch_result ret;
3949 
3950  if (!get_device(dev))
3951  return;
3952 
3953  while (1) {
3954  if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3955  !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3956  break;
3957 
3958  req = blk_fetch_request(q);
3959  if (!req)
3960  break;
3961 
3962  if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3963  req->errors = -ENXIO;
3964  spin_unlock_irq(q->queue_lock);
3965  blk_end_request_all(req, -ENXIO);
3966  spin_lock_irq(q->queue_lock);
3967  continue;
3968  }
3969 
3970  spin_unlock_irq(q->queue_lock);
3971 
3972  ret = fc_req_to_bsgjob(shost, rport, req);
3973  if (ret) {
3974  req->errors = ret;
3975  blk_end_request_all(req, ret);
3976  spin_lock_irq(q->queue_lock);
3977  continue;
3978  }
3979 
3980  job = req->special;
3981 
3982  /* check if we have the msgcode value at least */
3983  if (job->request_len < sizeof(uint32_t)) {
3984  BUG_ON(job->reply_len < sizeof(uint32_t));
3985  job->reply->reply_payload_rcv_len = 0;
3986  job->reply->result = -ENOMSG;
3987  job->reply_len = sizeof(uint32_t);
3988  fc_bsg_jobdone(job);
3989  spin_lock_irq(q->queue_lock);
3990  continue;
3991  }
3992 
3993  /* the dispatch routines will unlock the queue_lock */
3994  if (rport)
3995  ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3996  else
3997  ret = fc_bsg_host_dispatch(q, shost, job);
3998 
3999  /* did dispatcher hit state that can't process any more */
4000  if (ret == FC_DISPATCH_BREAK)
4001  break;
4002 
4003  /* did dispatcher had released the lock */
4004  if (ret == FC_DISPATCH_UNLOCKED)
4005  spin_lock_irq(q->queue_lock);
4006  }
4007 
4008  spin_unlock_irq(q->queue_lock);
4009  put_device(dev);
4010  spin_lock_irq(q->queue_lock);
4011 }
4012 
4013 
4018 static void
4019 fc_bsg_host_handler(struct request_queue *q)
4020 {
4021  struct Scsi_Host *shost = q->queuedata;
4022 
4023  fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
4024 }
4025 
4026 
4031 static void
4032 fc_bsg_rport_handler(struct request_queue *q)
4033 {
4034  struct fc_rport *rport = q->queuedata;
4035  struct Scsi_Host *shost = rport_to_shost(rport);
4036 
4037  fc_bsg_request_handler(q, shost, rport, &rport->dev);
4038 }
4039 
4040 
4046 static int
4047 fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
4048 {
4049  struct device *dev = &shost->shost_gendev;
4050  struct fc_internal *i = to_fc_internal(shost->transportt);
4051  struct request_queue *q;
4052  int err;
4053  char bsg_name[20];
4054 
4055  fc_host->rqst_q = NULL;
4056 
4057  if (!i->f->bsg_request)
4058  return -ENOTSUPP;
4059 
4060  snprintf(bsg_name, sizeof(bsg_name),
4061  "fc_host%d", shost->host_no);
4062 
4063  q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
4064  if (!q) {
4065  printk(KERN_ERR "fc_host%d: bsg interface failed to "
4066  "initialize - no request queue\n",
4067  shost->host_no);
4068  return -ENOMEM;
4069  }
4070 
4071  q->queuedata = shost;
4072  queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
4073  blk_queue_softirq_done(q, fc_bsg_softirq_done);
4074  blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4076 
4077  err = bsg_register_queue(q, dev, bsg_name, NULL);
4078  if (err) {
4079  printk(KERN_ERR "fc_host%d: bsg interface failed to "
4080  "initialize - register queue\n",
4081  shost->host_no);
4082  blk_cleanup_queue(q);
4083  return err;
4084  }
4085 
4086  fc_host->rqst_q = q;
4087  return 0;
4088 }
4089 
4090 
4096 static int
4097 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4098 {
4099  struct device *dev = &rport->dev;
4100  struct fc_internal *i = to_fc_internal(shost->transportt);
4101  struct request_queue *q;
4102  int err;
4103 
4104  rport->rqst_q = NULL;
4105 
4106  if (!i->f->bsg_request)
4107  return -ENOTSUPP;
4108 
4109  q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
4110  if (!q) {
4111  printk(KERN_ERR "%s: bsg interface failed to "
4112  "initialize - no request queue\n",
4113  dev->kobj.name);
4114  return -ENOMEM;
4115  }
4116 
4117  q->queuedata = rport;
4118  queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
4119  blk_queue_softirq_done(q, fc_bsg_softirq_done);
4120  blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4121  blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4122 
4123  err = bsg_register_queue(q, dev, NULL, NULL);
4124  if (err) {
4125  printk(KERN_ERR "%s: bsg interface failed to "
4126  "initialize - register queue\n",
4127  dev->kobj.name);
4128  blk_cleanup_queue(q);
4129  return err;
4130  }
4131 
4132  rport->rqst_q = q;
4133  return 0;
4134 }
4135 
4136 
4146 static void
4147 fc_bsg_remove(struct request_queue *q)
4148 {
4149  if (q) {
4151  blk_cleanup_queue(q);
4152  }
4153 }
4154 
4155 
4156 /* Original Author: Martin Hicks */
4157 MODULE_AUTHOR("James Smart");
4158 MODULE_DESCRIPTION("FC Transport Attributes");
4159 MODULE_LICENSE("GPL");
4160 
4161 module_init(fc_transport_init);
4162 module_exit(fc_transport_exit);