Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fcoe.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <linux/crc32.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <linux/workqueue.h>
34 #include <net/dcbnl.h>
35 #include <net/dcbevent.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsicam.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_transport_fc.h>
40 #include <net/rtnetlink.h>
41 
42 #include <scsi/fc/fc_encaps.h>
43 #include <scsi/fc/fc_fip.h>
44 #include <scsi/fc/fc_fcoe.h>
45 
46 #include <scsi/libfc.h>
47 #include <scsi/fc_frame.h>
48 #include <scsi/libfcoe.h>
49 
50 #include "fcoe.h"
51 
52 MODULE_AUTHOR("Open-FCoE.org");
53 MODULE_DESCRIPTION("FCoE");
54 MODULE_LICENSE("GPL v2");
55 
56 /* Performance tuning parameters for fcoe */
57 static unsigned int fcoe_ddp_min = 4096;
58 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
59 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
60  "Direct Data Placement (DDP).");
61 
62 unsigned int fcoe_debug_logging;
64 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
65 
66 static DEFINE_MUTEX(fcoe_config_mutex);
67 
68 static struct workqueue_struct *fcoe_wq;
69 
70 /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
71 static DECLARE_COMPLETION(fcoe_flush_completion);
72 
73 /* fcoe host list */
74 /* must only by accessed under the RTNL mutex */
75 static LIST_HEAD(fcoe_hostlist);
76 static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
77 
78 /* Function Prototypes */
79 static int fcoe_reset(struct Scsi_Host *);
80 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
81 static int fcoe_rcv(struct sk_buff *, struct net_device *,
82  struct packet_type *, struct net_device *);
83 static int fcoe_percpu_receive_thread(void *);
84 static void fcoe_percpu_clean(struct fc_lport *);
85 static int fcoe_link_speed_update(struct fc_lport *);
86 static int fcoe_link_ok(struct fc_lport *);
87 
88 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
89 static int fcoe_hostlist_add(const struct fc_lport *);
90 
91 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
92 static void fcoe_dev_setup(void);
93 static void fcoe_dev_cleanup(void);
94 static struct fcoe_interface
95 *fcoe_hostlist_lookup_port(const struct net_device *);
96 
97 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
98  struct packet_type *, struct net_device *);
99 
100 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
101 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
102 static u8 *fcoe_get_src_mac(struct fc_lport *);
103 static void fcoe_destroy_work(struct work_struct *);
104 
105 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
106  unsigned int);
107 static int fcoe_ddp_done(struct fc_lport *, u16);
108 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
109  unsigned int);
110 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
111 static int fcoe_dcb_app_notification(struct notifier_block *notifier,
112  ulong event, void *ptr);
113 
114 static bool fcoe_match(struct net_device *netdev);
115 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
116 static int fcoe_destroy(struct net_device *netdev);
117 static int fcoe_enable(struct net_device *netdev);
118 static int fcoe_disable(struct net_device *netdev);
119 
120 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
121  u32 did, struct fc_frame *,
122  unsigned int op,
123  void (*resp)(struct fc_seq *,
124  struct fc_frame *,
125  void *),
126  void *, u32 timeout);
127 static void fcoe_recv_frame(struct sk_buff *skb);
128 
129 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
130 
131 /* notification function for packets from net device */
132 static struct notifier_block fcoe_notifier = {
133  .notifier_call = fcoe_device_notification,
134 };
135 
136 /* notification function for CPU hotplug events */
137 static struct notifier_block fcoe_cpu_notifier = {
138  .notifier_call = fcoe_cpu_callback,
139 };
140 
141 /* notification function for DCB events */
142 static struct notifier_block dcb_notifier = {
143  .notifier_call = fcoe_dcb_app_notification,
144 };
145 
146 static struct scsi_transport_template *fcoe_nport_scsi_transport;
147 static struct scsi_transport_template *fcoe_vport_scsi_transport;
148 
149 static int fcoe_vport_destroy(struct fc_vport *);
150 static int fcoe_vport_create(struct fc_vport *, bool disabled);
151 static int fcoe_vport_disable(struct fc_vport *, bool disable);
152 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
153 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
154 static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
155 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
156 
157 static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
158  .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
159  .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
160  .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
161  .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
162  .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
163  .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
164  .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
165 
166  .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
167  .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
168 };
169 
170 static struct libfc_function_template fcoe_libfc_fcn_templ = {
171  .frame_send = fcoe_xmit,
172  .ddp_setup = fcoe_ddp_setup,
173  .ddp_done = fcoe_ddp_done,
174  .ddp_target = fcoe_ddp_target,
175  .elsct_send = fcoe_elsct_send,
176  .get_lesb = fcoe_get_lesb,
177  .lport_set_port_id = fcoe_set_port_id,
178 };
179 
180 static struct fc_function_template fcoe_nport_fc_functions = {
181  .show_host_node_name = 1,
182  .show_host_port_name = 1,
183  .show_host_supported_classes = 1,
184  .show_host_supported_fc4s = 1,
185  .show_host_active_fc4s = 1,
186  .show_host_maxframe_size = 1,
187  .show_host_serial_number = 1,
188  .show_host_manufacturer = 1,
189  .show_host_model = 1,
190  .show_host_model_description = 1,
191  .show_host_hardware_version = 1,
192  .show_host_driver_version = 1,
193  .show_host_firmware_version = 1,
194  .show_host_optionrom_version = 1,
195 
196  .show_host_port_id = 1,
197  .show_host_supported_speeds = 1,
198  .get_host_speed = fc_get_host_speed,
199  .show_host_speed = 1,
200  .show_host_port_type = 1,
201  .get_host_port_state = fc_get_host_port_state,
202  .show_host_port_state = 1,
203  .show_host_symbolic_name = 1,
204 
205  .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
206  .show_rport_maxframe_size = 1,
207  .show_rport_supported_classes = 1,
208 
209  .show_host_fabric_name = 1,
210  .show_starget_node_name = 1,
211  .show_starget_port_name = 1,
212  .show_starget_port_id = 1,
213  .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
214  .show_rport_dev_loss_tmo = 1,
215  .get_fc_host_stats = fc_get_host_stats,
216  .issue_fc_host_lip = fcoe_reset,
217 
218  .terminate_rport_io = fc_rport_terminate_io,
219 
220  .vport_create = fcoe_vport_create,
221  .vport_delete = fcoe_vport_destroy,
222  .vport_disable = fcoe_vport_disable,
223  .set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
224 
225  .bsg_request = fc_lport_bsg_request,
226 };
227 
228 static struct fc_function_template fcoe_vport_fc_functions = {
229  .show_host_node_name = 1,
230  .show_host_port_name = 1,
231  .show_host_supported_classes = 1,
232  .show_host_supported_fc4s = 1,
233  .show_host_active_fc4s = 1,
234  .show_host_maxframe_size = 1,
235  .show_host_serial_number = 1,
236  .show_host_manufacturer = 1,
237  .show_host_model = 1,
238  .show_host_model_description = 1,
239  .show_host_hardware_version = 1,
240  .show_host_driver_version = 1,
241  .show_host_firmware_version = 1,
242  .show_host_optionrom_version = 1,
243 
244  .show_host_port_id = 1,
245  .show_host_supported_speeds = 1,
246  .get_host_speed = fc_get_host_speed,
247  .show_host_speed = 1,
248  .show_host_port_type = 1,
249  .get_host_port_state = fc_get_host_port_state,
250  .show_host_port_state = 1,
251  .show_host_symbolic_name = 1,
252 
253  .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
254  .show_rport_maxframe_size = 1,
255  .show_rport_supported_classes = 1,
256 
257  .show_host_fabric_name = 1,
258  .show_starget_node_name = 1,
259  .show_starget_port_name = 1,
260  .show_starget_port_id = 1,
261  .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
262  .show_rport_dev_loss_tmo = 1,
263  .get_fc_host_stats = fc_get_host_stats,
264  .issue_fc_host_lip = fcoe_reset,
265 
266  .terminate_rport_io = fc_rport_terminate_io,
267 
268  .bsg_request = fc_lport_bsg_request,
269 };
270 
271 static struct scsi_host_template fcoe_shost_template = {
272  .module = THIS_MODULE,
273  .name = "FCoE Driver",
274  .proc_name = FCOE_NAME,
275  .queuecommand = fc_queuecommand,
276  .eh_abort_handler = fc_eh_abort,
277  .eh_device_reset_handler = fc_eh_device_reset,
278  .eh_host_reset_handler = fc_eh_host_reset,
279  .slave_alloc = fc_slave_alloc,
280  .change_queue_depth = fc_change_queue_depth,
281  .change_queue_type = fc_change_queue_type,
282  .this_id = -1,
283  .cmd_per_lun = 3,
284  .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
285  .use_clustering = ENABLE_CLUSTERING,
286  .sg_tablesize = SG_ALL,
287  .max_sectors = 0xffff,
288 };
289 
298 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
299  struct net_device *netdev)
300 {
301  struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
302  struct netdev_hw_addr *ha;
303  struct net_device *real_dev;
304  u8 flogi_maddr[ETH_ALEN];
305  const struct net_device_ops *ops;
306 
307  fcoe->netdev = netdev;
308 
309  /* Let LLD initialize for FCoE */
310  ops = netdev->netdev_ops;
311  if (ops->ndo_fcoe_enable) {
312  if (ops->ndo_fcoe_enable(netdev))
313  FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
314  " specific feature for LLD.\n");
315  }
316 
317  /* Do not support for bonding device */
318  if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
319  FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
320  return -EOPNOTSUPP;
321  }
322 
323  /* look for SAN MAC address, if multiple SAN MACs exist, only
324  * use the first one for SPMA */
325  real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
326  vlan_dev_real_dev(netdev) : netdev;
327  fcoe->realdev = real_dev;
328  rcu_read_lock();
329  for_each_dev_addr(real_dev, ha) {
330  if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
331  (is_valid_ether_addr(ha->addr))) {
332  memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
333  fip->spma = 1;
334  break;
335  }
336  }
337  rcu_read_unlock();
338 
339  /* setup Source Mac Address */
340  if (!fip->spma)
341  memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
342 
343  /*
344  * Add FCoE MAC address as second unicast MAC address
345  * or enter promiscuous mode if not capable of listening
346  * for multiple unicast MACs.
347  */
348  memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
349  dev_uc_add(netdev, flogi_maddr);
350  if (fip->spma)
351  dev_uc_add(netdev, fip->ctl_src_addr);
352  if (fip->mode == FIP_MODE_VN2VN) {
354  dev_mc_add(netdev, FIP_ALL_P2P_MACS);
355  } else
357 
358  /*
359  * setup the receive function from ethernet driver
360  * on the ethertype for the given device
361  */
362  fcoe->fcoe_packet_type.func = fcoe_rcv;
364  fcoe->fcoe_packet_type.dev = netdev;
366 
367  fcoe->fip_packet_type.func = fcoe_fip_recv;
368  fcoe->fip_packet_type.type = htons(ETH_P_FIP);
369  fcoe->fip_packet_type.dev = netdev;
371 
372  return 0;
373 }
374 
382 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
383  enum fip_state fip_mode)
384 {
385  struct fcoe_ctlr_device *ctlr_dev;
386  struct fcoe_ctlr *ctlr;
387  struct fcoe_interface *fcoe;
388  int size;
389  int err;
390 
391  if (!try_module_get(THIS_MODULE)) {
392  FCOE_NETDEV_DBG(netdev,
393  "Could not get a reference to the module\n");
394  fcoe = ERR_PTR(-EBUSY);
395  goto out;
396  }
397 
398  size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
399  ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
400  size);
401  if (!ctlr_dev) {
402  FCOE_DBG("Failed to add fcoe_ctlr_device\n");
403  fcoe = ERR_PTR(-ENOMEM);
404  goto out_putmod;
405  }
406 
407  ctlr = fcoe_ctlr_device_priv(ctlr_dev);
408  fcoe = fcoe_ctlr_priv(ctlr);
409 
410  dev_hold(netdev);
411 
412  /*
413  * Initialize FIP.
414  */
415  fcoe_ctlr_init(ctlr, fip_mode);
416  ctlr->send = fcoe_fip_send;
417  ctlr->update_mac = fcoe_update_src_mac;
418  ctlr->get_src_addr = fcoe_get_src_mac;
419 
420  err = fcoe_interface_setup(fcoe, netdev);
421  if (err) {
422  fcoe_ctlr_destroy(ctlr);
423  fcoe_ctlr_device_delete(ctlr_dev);
424  dev_put(netdev);
425  fcoe = ERR_PTR(err);
426  goto out_putmod;
427  }
428 
429  goto out;
430 
431 out_putmod:
432  module_put(THIS_MODULE);
433 out:
434  return fcoe;
435 }
436 
443 static void fcoe_interface_remove(struct fcoe_interface *fcoe)
444 {
445  struct net_device *netdev = fcoe->netdev;
446  struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
447  u8 flogi_maddr[ETH_ALEN];
448  const struct net_device_ops *ops;
449 
450  /*
451  * Don't listen for Ethernet packets anymore.
452  * synchronize_net() ensures that the packet handlers are not running
453  * on another CPU. dev_remove_pack() would do that, this calls the
454  * unsyncronized version __dev_remove_pack() to avoid multiple delays.
455  */
458  synchronize_net();
459 
460  /* Delete secondary MAC addresses */
461  memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
462  dev_uc_del(netdev, flogi_maddr);
463  if (fip->spma)
464  dev_uc_del(netdev, fip->ctl_src_addr);
465  if (fip->mode == FIP_MODE_VN2VN) {
467  dev_mc_del(netdev, FIP_ALL_P2P_MACS);
468  } else
470 
471  /* Tell the LLD we are done w/ FCoE */
472  ops = netdev->netdev_ops;
473  if (ops->ndo_fcoe_disable) {
474  if (ops->ndo_fcoe_disable(netdev))
475  FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
476  " specific feature for LLD.\n");
477  }
478  fcoe->removed = 1;
479 }
480 
481 
486 static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
487 {
488  struct net_device *netdev = fcoe->netdev;
489  struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
490  struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
491 
492  rtnl_lock();
493  if (!fcoe->removed)
494  fcoe_interface_remove(fcoe);
495  rtnl_unlock();
496 
497  /* Release the self-reference taken during fcoe_interface_create() */
498  /* tear-down the FCoE controller */
499  fcoe_ctlr_destroy(fip);
500  scsi_host_put(fip->lp->host);
501  fcoe_ctlr_device_delete(ctlr_dev);
502  dev_put(netdev);
503  module_put(THIS_MODULE);
504 }
505 
516 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
517  struct packet_type *ptype,
518  struct net_device *orig_dev)
519 {
520  struct fcoe_interface *fcoe;
521  struct fcoe_ctlr *ctlr;
522 
523  fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
524  ctlr = fcoe_to_ctlr(fcoe);
525  fcoe_ctlr_recv(ctlr, skb);
526  return 0;
527 }
528 
534 static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
535 {
536  if (port->fcoe_pending_queue.qlen)
537  fcoe_check_wait_queue(port->lport, skb);
538  else if (fcoe_start_io(skb))
539  fcoe_check_wait_queue(port->lport, skb);
540 }
541 
547 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
548 {
549  skb->dev = fcoe_from_ctlr(fip)->netdev;
550  fcoe_port_send(lport_priv(fip->lp), skb);
551 }
552 
561 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
562 {
563  struct fcoe_port *port = lport_priv(lport);
564  struct fcoe_interface *fcoe = port->priv;
565 
566  if (!is_zero_ether_addr(port->data_src_addr))
567  dev_uc_del(fcoe->netdev, port->data_src_addr);
568  if (!is_zero_ether_addr(addr))
569  dev_uc_add(fcoe->netdev, addr);
570  memcpy(port->data_src_addr, addr, ETH_ALEN);
571 }
572 
577 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
578 {
579  struct fcoe_port *port = lport_priv(lport);
580 
581  return port->data_src_addr;
582 }
583 
590 static int fcoe_lport_config(struct fc_lport *lport)
591 {
592  lport->link_up = 0;
593  lport->qfull = 0;
594  lport->max_retry_count = 3;
595  lport->max_rport_retry_count = 3;
596  lport->e_d_tov = 2 * 1000; /* FC-FS default */
597  lport->r_a_tov = 2 * 2 * 1000;
600  lport->does_npiv = 1;
601 
602  fc_lport_init_stats(lport);
603 
604  /* lport fc_lport related configuration */
605  fc_lport_config(lport);
606 
607  /* offload related configuration */
608  lport->crc_offload = 0;
609  lport->seq_offload = 0;
610  lport->lro_enabled = 0;
611  lport->lro_xid = 0;
612  lport->lso_max = 0;
613 
614  return 0;
615 }
616 
621 static void fcoe_netdev_features_change(struct fc_lport *lport,
622  struct net_device *netdev)
623 {
624  mutex_lock(&lport->lp_mutex);
625 
626  if (netdev->features & NETIF_F_SG)
627  lport->sg_supp = 1;
628  else
629  lport->sg_supp = 0;
630 
631  if (netdev->features & NETIF_F_FCOE_CRC) {
632  lport->crc_offload = 1;
633  FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
634  } else {
635  lport->crc_offload = 0;
636  }
637 
638  if (netdev->features & NETIF_F_FSO) {
639  lport->seq_offload = 1;
640  lport->lso_max = netdev->gso_max_size;
641  FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
642  lport->lso_max);
643  } else {
644  lport->seq_offload = 0;
645  lport->lso_max = 0;
646  }
647 
648  if (netdev->fcoe_ddp_xid) {
649  lport->lro_enabled = 1;
650  lport->lro_xid = netdev->fcoe_ddp_xid;
651  FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
652  lport->lro_xid);
653  } else {
654  lport->lro_enabled = 0;
655  lport->lro_xid = 0;
656  }
657 
658  mutex_unlock(&lport->lp_mutex);
659 }
660 
670 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
671 {
672  u32 mfs;
673  u64 wwnn, wwpn;
674  struct fcoe_interface *fcoe;
675  struct fcoe_ctlr *ctlr;
676  struct fcoe_port *port;
677 
678  /* Setup lport private data to point to fcoe softc */
679  port = lport_priv(lport);
680  fcoe = port->priv;
681  ctlr = fcoe_to_ctlr(fcoe);
682 
683  /*
684  * Determine max frame size based on underlying device and optional
685  * user-configured limit. If the MFS is too low, fcoe_link_ok()
686  * will return 0, so do this first.
687  */
688  mfs = netdev->mtu;
689  if (netdev->features & NETIF_F_FCOE_MTU) {
690  mfs = FCOE_MTU;
691  FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
692  }
693  mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
694  if (fc_set_mfs(lport, mfs))
695  return -EINVAL;
696 
697  /* offload features support */
698  fcoe_netdev_features_change(lport, netdev);
699 
700  skb_queue_head_init(&port->fcoe_pending_queue);
701  port->fcoe_pending_queue_active = 0;
702  setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
703 
704  fcoe_link_speed_update(lport);
705 
706  if (!lport->vport) {
707  if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
708  wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
709  fc_set_wwnn(lport, wwnn);
710  if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
711  wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
712  2, 0);
713  fc_set_wwpn(lport, wwpn);
714  }
715 
716  return 0;
717 }
718 
728 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
729 {
730  int rc = 0;
731 
732  /* lport scsi host config */
733  lport->host->max_lun = FCOE_MAX_LUN;
734  lport->host->max_id = FCOE_MAX_FCP_TARGET;
735  lport->host->max_channel = 0;
736  lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
737 
738  if (lport->vport)
739  lport->host->transportt = fcoe_vport_scsi_transport;
740  else
741  lport->host->transportt = fcoe_nport_scsi_transport;
742 
743  /* add the new host to the SCSI-ml */
744  rc = scsi_add_host(lport->host, dev);
745  if (rc) {
746  FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
747  "error on scsi_add_host\n");
748  return rc;
749  }
750 
751  if (!lport->vport)
753 
755  "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
756  fcoe_netdev(lport)->name);
757 
758  return 0;
759 }
760 
761 
770 static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
771 {
772  struct fcoe_interface *fcoe;
773  struct fcoe_port *port;
774  struct net_device *realdev;
775  int rc;
776  struct netdev_fcoe_hbainfo fdmi;
777 
778  port = lport_priv(lport);
779  fcoe = port->priv;
780  realdev = fcoe->realdev;
781 
782  if (!realdev)
783  return;
784 
785  /* No FDMI state m/c for NPIV ports */
786  if (lport->vport)
787  return;
788 
789  if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
790  memset(&fdmi, 0, sizeof(fdmi));
791  rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
792  &fdmi);
793  if (rc) {
794  printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
795  "information from netdev.\n");
796  return;
797  }
798 
801  "%s",
802  fdmi.serial_number);
805  "%s",
806  fdmi.manufacturer);
807  snprintf(fc_host_model(lport->host),
809  "%s",
810  fdmi.model);
813  "%s",
814  fdmi.model_description);
817  "%s",
818  fdmi.hardware_version);
821  "%s",
822  fdmi.driver_version);
825  "%s",
826  fdmi.optionrom_version);
829  "%s",
830  fdmi.firmware_version);
831 
832  /* Enable FDMI lport states */
833  lport->fdmi_enabled = 1;
834  } else {
835  lport->fdmi_enabled = 0;
836  printk(KERN_INFO "fcoe: No FDMI support.\n");
837  }
838 }
839 
858 static bool fcoe_oem_match(struct fc_frame *fp)
859 {
860  struct fc_frame_header *fh = fc_frame_header_get(fp);
861  struct fcp_cmnd *fcp;
862 
863  if (fc_fcp_is_read(fr_fsp(fp)) &&
864  (fr_fsp(fp)->data_len > fcoe_ddp_min))
865  return true;
866  else if ((fr_fsp(fp) == NULL) &&
867  (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
868  (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
869  fcp = fc_frame_payload_get(fp, sizeof(*fcp));
870  if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
871  (ntohl(fcp->fc_dl) > fcoe_ddp_min))
872  return true;
873  }
874  return false;
875 }
876 
883 static inline int fcoe_em_config(struct fc_lport *lport)
884 {
885  struct fcoe_port *port = lport_priv(lport);
886  struct fcoe_interface *fcoe = port->priv;
887  struct fcoe_interface *oldfcoe = NULL;
888  struct net_device *old_real_dev, *cur_real_dev;
889  u16 min_xid = FCOE_MIN_XID;
890  u16 max_xid = FCOE_MAX_XID;
891 
892  /*
893  * Check if need to allocate an em instance for
894  * offload exchange ids to be shared across all VN_PORTs/lport.
895  */
896  if (!lport->lro_enabled || !lport->lro_xid ||
897  (lport->lro_xid >= max_xid)) {
898  lport->lro_xid = 0;
899  goto skip_oem;
900  }
901 
902  /*
903  * Reuse existing offload em instance in case
904  * it is already allocated on real eth device
905  */
906  if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
907  cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
908  else
909  cur_real_dev = fcoe->netdev;
910 
911  list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
912  if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
913  old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
914  else
915  old_real_dev = oldfcoe->netdev;
916 
917  if (cur_real_dev == old_real_dev) {
918  fcoe->oem = oldfcoe->oem;
919  break;
920  }
921  }
922 
923  if (fcoe->oem) {
924  if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
925  printk(KERN_ERR "fcoe_em_config: failed to add "
926  "offload em:%p on interface:%s\n",
927  fcoe->oem, fcoe->netdev->name);
928  return -ENOMEM;
929  }
930  } else {
931  fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
932  FCOE_MIN_XID, lport->lro_xid,
933  fcoe_oem_match);
934  if (!fcoe->oem) {
935  printk(KERN_ERR "fcoe_em_config: failed to allocate "
936  "em for offload exches on interface:%s\n",
937  fcoe->netdev->name);
938  return -ENOMEM;
939  }
940  }
941 
942  /*
943  * Exclude offload EM xid range from next EM xid range.
944  */
945  min_xid += lport->lro_xid + 1;
946 
947 skip_oem:
948  if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
949  printk(KERN_ERR "fcoe_em_config: failed to "
950  "allocate em on interface %s\n", fcoe->netdev->name);
951  return -ENOMEM;
952  }
953 
954  return 0;
955 }
956 
962 static void fcoe_if_destroy(struct fc_lport *lport)
963 {
964  struct fcoe_port *port = lport_priv(lport);
965  struct fcoe_interface *fcoe = port->priv;
966  struct net_device *netdev = fcoe->netdev;
967 
968  FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
969 
970  /* Logout of the fabric */
971  fc_fabric_logoff(lport);
972 
973  /* Cleanup the fc_lport */
974  fc_lport_destroy(lport);
975 
976  /* Stop the transmit retry timer */
977  del_timer_sync(&port->timer);
978 
979  /* Free existing transmit skbs */
981 
982  rtnl_lock();
983  if (!is_zero_ether_addr(port->data_src_addr))
984  dev_uc_del(netdev, port->data_src_addr);
985  if (lport->vport)
986  synchronize_net();
987  else
988  fcoe_interface_remove(fcoe);
989  rtnl_unlock();
990 
991  /* Free queued packets for the per-CPU receive threads */
992  fcoe_percpu_clean(lport);
993 
994  /* Detach from the scsi-ml */
995  fc_remove_host(lport->host);
996  scsi_remove_host(lport->host);
997 
998  /* Destroy lport scsi_priv */
999  fc_fcp_destroy(lport);
1000 
1001  /* There are no more rports or I/O, free the EM */
1002  fc_exch_mgr_free(lport);
1003 
1004  /* Free memory used by statistical counters */
1005  fc_lport_free_stats(lport);
1006 
1007  /*
1008  * Release the Scsi_Host for vport but hold on to
1009  * master lport until it fcoe interface fully cleaned-up.
1010  */
1011  if (lport->vport)
1012  scsi_host_put(lport->host);
1013 }
1014 
1024 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
1025  struct scatterlist *sgl, unsigned int sgc)
1026 {
1027  struct net_device *netdev = fcoe_netdev(lport);
1028 
1029  if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
1030  return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
1031  xid, sgl,
1032  sgc);
1033 
1034  return 0;
1035 }
1036 
1046 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
1047  struct scatterlist *sgl, unsigned int sgc)
1048 {
1049  struct net_device *netdev = fcoe_netdev(lport);
1050 
1051  if (netdev->netdev_ops->ndo_fcoe_ddp_target)
1052  return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
1053  sgl, sgc);
1054 
1055  return 0;
1056 }
1057 
1058 
1066 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
1067 {
1068  struct net_device *netdev = fcoe_netdev(lport);
1069 
1070  if (netdev->netdev_ops->ndo_fcoe_ddp_done)
1071  return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
1072  return 0;
1073 }
1074 
1085 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1086  struct device *parent, int npiv)
1087 {
1088  struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1089  struct net_device *netdev = fcoe->netdev;
1090  struct fc_lport *lport, *n_port;
1091  struct fcoe_port *port;
1092  struct Scsi_Host *shost;
1093  int rc;
1094  /*
1095  * parent is only a vport if npiv is 1,
1096  * but we'll only use vport in that case so go ahead and set it
1097  */
1098  struct fc_vport *vport = dev_to_vport(parent);
1099 
1100  FCOE_NETDEV_DBG(netdev, "Create Interface\n");
1101 
1102  if (!npiv)
1103  lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
1104  else
1105  lport = libfc_vport_create(vport, sizeof(*port));
1106 
1107  if (!lport) {
1108  FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
1109  rc = -ENOMEM;
1110  goto out;
1111  }
1112  port = lport_priv(lport);
1113  port->lport = lport;
1114  port->priv = fcoe;
1117  INIT_WORK(&port->destroy_work, fcoe_destroy_work);
1118 
1119  /* configure a fc_lport including the exchange manager */
1120  rc = fcoe_lport_config(lport);
1121  if (rc) {
1122  FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
1123  "interface\n");
1124  goto out_host_put;
1125  }
1126 
1127  if (npiv) {
1128  FCOE_NETDEV_DBG(netdev, "Setting vport names, "
1129  "%16.16llx %16.16llx\n",
1130  vport->node_name, vport->port_name);
1131  fc_set_wwnn(lport, vport->node_name);
1132  fc_set_wwpn(lport, vport->port_name);
1133  }
1134 
1135  /* configure lport network properties */
1136  rc = fcoe_netdev_config(lport, netdev);
1137  if (rc) {
1138  FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
1139  "interface\n");
1140  goto out_lp_destroy;
1141  }
1142 
1143  /* configure lport scsi host properties */
1144  rc = fcoe_shost_config(lport, parent);
1145  if (rc) {
1146  FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
1147  "interface\n");
1148  goto out_lp_destroy;
1149  }
1150 
1151  /* Initialize the library */
1152  rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
1153  if (rc) {
1154  FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1155  "interface\n");
1156  goto out_lp_destroy;
1157  }
1158 
1159  /* Initialized FDMI information */
1160  fcoe_fdmi_info(lport, netdev);
1161 
1162  /*
1163  * fcoe_em_alloc() and fcoe_hostlist_add() both
1164  * need to be atomic with respect to other changes to the
1165  * hostlist since fcoe_em_alloc() looks for an existing EM
1166  * instance on host list updated by fcoe_hostlist_add().
1167  *
1168  * This is currently handled through the fcoe_config_mutex
1169  * begin held.
1170  */
1171  if (!npiv)
1172  /* lport exch manager allocation */
1173  rc = fcoe_em_config(lport);
1174  else {
1175  shost = vport_to_shost(vport);
1176  n_port = shost_priv(shost);
1177  rc = fc_exch_mgr_list_clone(n_port, lport);
1178  }
1179 
1180  if (rc) {
1181  FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
1182  goto out_lp_destroy;
1183  }
1184 
1185  return lport;
1186 
1187 out_lp_destroy:
1188  fc_exch_mgr_free(lport);
1189 out_host_put:
1190  scsi_host_put(lport->host);
1191 out:
1192  return ERR_PTR(rc);
1193 }
1194 
1202 static int __init fcoe_if_init(void)
1203 {
1204  /* attach to scsi transport */
1205  fcoe_nport_scsi_transport =
1206  fc_attach_transport(&fcoe_nport_fc_functions);
1207  fcoe_vport_scsi_transport =
1208  fc_attach_transport(&fcoe_vport_fc_functions);
1209 
1210  if (!fcoe_nport_scsi_transport) {
1211  printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1212  return -ENODEV;
1213  }
1214 
1215  return 0;
1216 }
1217 
1225 static int __exit fcoe_if_exit(void)
1226 {
1227  fc_release_transport(fcoe_nport_scsi_transport);
1228  fc_release_transport(fcoe_vport_scsi_transport);
1229  fcoe_nport_scsi_transport = NULL;
1230  fcoe_vport_scsi_transport = NULL;
1231  return 0;
1232 }
1233 
1238 static void fcoe_percpu_thread_create(unsigned int cpu)
1239 {
1240  struct fcoe_percpu_s *p;
1241  struct task_struct *thread;
1242 
1243  p = &per_cpu(fcoe_percpu, cpu);
1244 
1245  thread = kthread_create_on_node(fcoe_percpu_receive_thread,
1246  (void *)p, cpu_to_node(cpu),
1247  "fcoethread/%d", cpu);
1248 
1249  if (likely(!IS_ERR(thread))) {
1250  kthread_bind(thread, cpu);
1251  wake_up_process(thread);
1252 
1253  spin_lock_bh(&p->fcoe_rx_list.lock);
1254  p->thread = thread;
1255  spin_unlock_bh(&p->fcoe_rx_list.lock);
1256  }
1257 }
1258 
1267 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1268 {
1269  struct fcoe_percpu_s *p;
1270  struct task_struct *thread;
1271  struct page *crc_eof;
1272  struct sk_buff *skb;
1273 #ifdef CONFIG_SMP
1274  struct fcoe_percpu_s *p0;
1275  unsigned targ_cpu = get_cpu();
1276 #endif /* CONFIG_SMP */
1277 
1278  FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1279 
1280  /* Prevent any new skbs from being queued for this CPU. */
1281  p = &per_cpu(fcoe_percpu, cpu);
1282  spin_lock_bh(&p->fcoe_rx_list.lock);
1283  thread = p->thread;
1284  p->thread = NULL;
1285  crc_eof = p->crc_eof_page;
1286  p->crc_eof_page = NULL;
1287  p->crc_eof_offset = 0;
1288  spin_unlock_bh(&p->fcoe_rx_list.lock);
1289 
1290 #ifdef CONFIG_SMP
1291  /*
1292  * Don't bother moving the skb's if this context is running
1293  * on the same CPU that is having its thread destroyed. This
1294  * can easily happen when the module is removed.
1295  */
1296  if (cpu != targ_cpu) {
1297  p0 = &per_cpu(fcoe_percpu, targ_cpu);
1298  spin_lock_bh(&p0->fcoe_rx_list.lock);
1299  if (p0->thread) {
1300  FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1301  cpu, targ_cpu);
1302 
1303  while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1304  __skb_queue_tail(&p0->fcoe_rx_list, skb);
1305  spin_unlock_bh(&p0->fcoe_rx_list.lock);
1306  } else {
1307  /*
1308  * The targeted CPU is not initialized and cannot accept
1309  * new skbs. Unlock the targeted CPU and drop the skbs
1310  * on the CPU that is going offline.
1311  */
1312  while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1313  kfree_skb(skb);
1314  spin_unlock_bh(&p0->fcoe_rx_list.lock);
1315  }
1316  } else {
1317  /*
1318  * This scenario occurs when the module is being removed
1319  * and all threads are being destroyed. skbs will continue
1320  * to be shifted from the CPU thread that is being removed
1321  * to the CPU thread associated with the CPU that is processing
1322  * the module removal. Once there is only one CPU Rx thread it
1323  * will reach this case and we will drop all skbs and later
1324  * stop the thread.
1325  */
1326  spin_lock_bh(&p->fcoe_rx_list.lock);
1327  while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1328  kfree_skb(skb);
1329  spin_unlock_bh(&p->fcoe_rx_list.lock);
1330  }
1331  put_cpu();
1332 #else
1333  /*
1334  * This a non-SMP scenario where the singular Rx thread is
1335  * being removed. Free all skbs and stop the thread.
1336  */
1337  spin_lock_bh(&p->fcoe_rx_list.lock);
1338  while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1339  kfree_skb(skb);
1340  spin_unlock_bh(&p->fcoe_rx_list.lock);
1341 #endif
1342 
1343  if (thread)
1344  kthread_stop(thread);
1345 
1346  if (crc_eof)
1347  put_page(crc_eof);
1348 }
1349 
1360 static int fcoe_cpu_callback(struct notifier_block *nfb,
1361  unsigned long action, void *hcpu)
1362 {
1363  unsigned cpu = (unsigned long)hcpu;
1364 
1365  switch (action) {
1366  case CPU_ONLINE:
1367  case CPU_ONLINE_FROZEN:
1368  FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1369  fcoe_percpu_thread_create(cpu);
1370  break;
1371  case CPU_DEAD:
1372  case CPU_DEAD_FROZEN:
1373  FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1374  fcoe_percpu_thread_destroy(cpu);
1375  break;
1376  default:
1377  break;
1378  }
1379  return NOTIFY_OK;
1380 }
1381 
1391 static inline unsigned int fcoe_select_cpu(void)
1392 {
1393  static unsigned int selected_cpu;
1394 
1395  selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1396  if (selected_cpu >= nr_cpu_ids)
1397  selected_cpu = cpumask_first(cpu_online_mask);
1398 
1399  return selected_cpu;
1400 }
1401 
1414 static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1415  struct packet_type *ptype, struct net_device *olddev)
1416 {
1417  struct fc_lport *lport;
1418  struct fcoe_rcv_info *fr;
1419  struct fcoe_ctlr *ctlr;
1420  struct fcoe_interface *fcoe;
1421  struct fc_frame_header *fh;
1422  struct fcoe_percpu_s *fps;
1423  struct ethhdr *eh;
1424  unsigned int cpu;
1425 
1426  fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1427  ctlr = fcoe_to_ctlr(fcoe);
1428  lport = ctlr->lp;
1429  if (unlikely(!lport)) {
1430  FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1431  goto err2;
1432  }
1433  if (!lport->link_up)
1434  goto err2;
1435 
1436  FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1437  "data:%p tail:%p end:%p sum:%d dev:%s",
1438  skb->len, skb->data_len, skb->head, skb->data,
1439  skb_tail_pointer(skb), skb_end_pointer(skb),
1440  skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1441 
1442  eh = eth_hdr(skb);
1443 
1444  if (is_fip_mode(ctlr) &&
1445  compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
1446  FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1447  eh->h_source);
1448  goto err;
1449  }
1450 
1451  /*
1452  * Check for minimum frame length, and make sure required FCoE
1453  * and FC headers are pulled into the linear data area.
1454  */
1455  if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1456  !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1457  goto err;
1458 
1459  skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1460  fh = (struct fc_frame_header *) skb_transport_header(skb);
1461 
1462  if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
1463  FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
1464  eh->h_dest);
1465  goto err;
1466  }
1467 
1468  fr = fcoe_dev_from_skb(skb);
1469  fr->fr_dev = lport;
1470 
1471  /*
1472  * In case the incoming frame's exchange is originated from
1473  * the initiator, then received frame's exchange id is ANDed
1474  * with fc_cpu_mask bits to get the same cpu on which exchange
1475  * was originated, otherwise select cpu using rx exchange id
1476  * or fcoe_select_cpu().
1477  */
1478  if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1479  cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1480  else {
1481  if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1482  cpu = fcoe_select_cpu();
1483  else
1484  cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1485  }
1486 
1487  if (cpu >= nr_cpu_ids)
1488  goto err;
1489 
1490  fps = &per_cpu(fcoe_percpu, cpu);
1491  spin_lock(&fps->fcoe_rx_list.lock);
1492  if (unlikely(!fps->thread)) {
1493  /*
1494  * The targeted CPU is not ready, let's target
1495  * the first CPU now. For non-SMP systems this
1496  * will check the same CPU twice.
1497  */
1498  FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1499  "ready for incoming skb- using first online "
1500  "CPU.\n");
1501 
1502  spin_unlock(&fps->fcoe_rx_list.lock);
1503  cpu = cpumask_first(cpu_online_mask);
1504  fps = &per_cpu(fcoe_percpu, cpu);
1505  spin_lock(&fps->fcoe_rx_list.lock);
1506  if (!fps->thread) {
1507  spin_unlock(&fps->fcoe_rx_list.lock);
1508  goto err;
1509  }
1510  }
1511 
1512  /*
1513  * We now have a valid CPU that we're targeting for
1514  * this skb. We also have this receive thread locked,
1515  * so we're free to queue skbs into it's queue.
1516  */
1517 
1518  /*
1519  * Note: We used to have a set of conditions under which we would
1520  * call fcoe_recv_frame directly, rather than queuing to the rx list
1521  * as it could save a few cycles, but doing so is prohibited, as
1522  * fcoe_recv_frame has several paths that may sleep, which is forbidden
1523  * in softirq context.
1524  */
1525  __skb_queue_tail(&fps->fcoe_rx_list, skb);
1526  if (fps->thread->state == TASK_INTERRUPTIBLE)
1527  wake_up_process(fps->thread);
1528  spin_unlock(&fps->fcoe_rx_list.lock);
1529 
1530  return 0;
1531 err:
1532  per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
1533  put_cpu();
1534 err2:
1535  kfree_skb(skb);
1536  return -1;
1537 }
1538 
1546 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1547 {
1548  struct fcoe_percpu_s *fps;
1549  int rc;
1550 
1551  fps = &get_cpu_var(fcoe_percpu);
1552  rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
1553  put_cpu_var(fcoe_percpu);
1554 
1555  return rc;
1556 }
1557 
1565 static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1566 {
1567  int wlen;
1568  u32 crc;
1569  struct ethhdr *eh;
1570  struct fcoe_crc_eof *cp;
1571  struct sk_buff *skb;
1572  struct fc_stats *stats;
1573  struct fc_frame_header *fh;
1574  unsigned int hlen; /* header length implies the version */
1575  unsigned int tlen; /* trailer length */
1576  unsigned int elen; /* eth header, may include vlan */
1577  struct fcoe_port *port = lport_priv(lport);
1578  struct fcoe_interface *fcoe = port->priv;
1579  struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1580  u8 sof, eof;
1581  struct fcoe_hdr *hp;
1582 
1583  WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1584 
1585  fh = fc_frame_header_get(fp);
1586  skb = fp_skb(fp);
1587  wlen = skb->len / FCOE_WORD_TO_BYTE;
1588 
1589  if (!lport->link_up) {
1590  kfree_skb(skb);
1591  return 0;
1592  }
1593 
1594  if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1595  fcoe_ctlr_els_send(ctlr, lport, skb))
1596  return 0;
1597 
1598  sof = fr_sof(fp);
1599  eof = fr_eof(fp);
1600 
1601  elen = sizeof(struct ethhdr);
1602  hlen = sizeof(struct fcoe_hdr);
1603  tlen = sizeof(struct fcoe_crc_eof);
1604  wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1605 
1606  /* crc offload */
1607  if (likely(lport->crc_offload)) {
1609  skb->csum_start = skb_headroom(skb);
1610  skb->csum_offset = skb->len;
1611  crc = 0;
1612  } else {
1613  skb->ip_summed = CHECKSUM_NONE;
1614  crc = fcoe_fc_crc(fp);
1615  }
1616 
1617  /* copy port crc and eof to the skb buff */
1618  if (skb_is_nonlinear(skb)) {
1619  skb_frag_t *frag;
1620  if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
1621  kfree_skb(skb);
1622  return -ENOMEM;
1623  }
1624  frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1625  cp = kmap_atomic(skb_frag_page(frag))
1626  + frag->page_offset;
1627  } else {
1628  cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1629  }
1630 
1631  memset(cp, 0, sizeof(*cp));
1632  cp->fcoe_eof = eof;
1633  cp->fcoe_crc32 = cpu_to_le32(~crc);
1634 
1635  if (skb_is_nonlinear(skb)) {
1636  kunmap_atomic(cp);
1637  cp = NULL;
1638  }
1639 
1640  /* adjust skb network/transport offsets to match mac/fcoe/port */
1641  skb_push(skb, elen + hlen);
1642  skb_reset_mac_header(skb);
1643  skb_reset_network_header(skb);
1644  skb->mac_len = elen;
1645  skb->protocol = htons(ETH_P_FCOE);
1646  skb->priority = fcoe->priority;
1647 
1648  if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1649  fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
1650  skb->vlan_tci = VLAN_TAG_PRESENT |
1651  vlan_dev_vlan_id(fcoe->netdev);
1652  skb->dev = fcoe->realdev;
1653  } else
1654  skb->dev = fcoe->netdev;
1655 
1656  /* fill up mac and fcoe headers */
1657  eh = eth_hdr(skb);
1658  eh->h_proto = htons(ETH_P_FCOE);
1659  memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
1660  if (ctlr->map_dest)
1661  memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1662 
1663  if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
1664  memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
1665  else
1666  memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1667 
1668  hp = (struct fcoe_hdr *)(eh + 1);
1669  memset(hp, 0, sizeof(*hp));
1670  if (FC_FCOE_VER)
1672  hp->fcoe_sof = sof;
1673 
1674  /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1675  if (lport->seq_offload && fr_max_payload(fp)) {
1676  skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1677  skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1678  } else {
1679  skb_shinfo(skb)->gso_type = 0;
1680  skb_shinfo(skb)->gso_size = 0;
1681  }
1682  /* update tx stats: regardless if LLD fails */
1683  stats = per_cpu_ptr(lport->stats, get_cpu());
1684  stats->TxFrames++;
1685  stats->TxWords += wlen;
1686  put_cpu();
1687 
1688  /* send down to lld */
1689  fr_dev(fp) = lport;
1690  fcoe_port_send(port, skb);
1691  return 0;
1692 }
1693 
1698 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1699 {
1700  complete(&fcoe_flush_completion);
1701 }
1702 
1710 static inline int fcoe_filter_frames(struct fc_lport *lport,
1711  struct fc_frame *fp)
1712 {
1713  struct fcoe_ctlr *ctlr;
1714  struct fcoe_interface *fcoe;
1715  struct fc_frame_header *fh;
1716  struct sk_buff *skb = (struct sk_buff *)fp;
1717  struct fc_stats *stats;
1718 
1719  /*
1720  * We only check CRC if no offload is available and if it is
1721  * it's solicited data, in which case, the FCP layer would
1722  * check it during the copy.
1723  */
1724  if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1725  fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1726  else
1728 
1729  fh = (struct fc_frame_header *) skb_transport_header(skb);
1730  fh = fc_frame_header_get(fp);
1731  if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
1732  return 0;
1733 
1734  fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1735  ctlr = fcoe_to_ctlr(fcoe);
1736  if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1737  ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1738  FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1739  return -EINVAL;
1740  }
1741 
1742  if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
1743  le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
1744  fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1745  return 0;
1746  }
1747 
1748  stats = per_cpu_ptr(lport->stats, get_cpu());
1749  stats->InvalidCRCCount++;
1750  if (stats->InvalidCRCCount < 5)
1751  printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
1752  put_cpu();
1753  return -EINVAL;
1754 }
1755 
1760 static void fcoe_recv_frame(struct sk_buff *skb)
1761 {
1762  u32 fr_len;
1763  struct fc_lport *lport;
1764  struct fcoe_rcv_info *fr;
1765  struct fc_stats *stats;
1766  struct fcoe_crc_eof crc_eof;
1767  struct fc_frame *fp;
1768  struct fcoe_port *port;
1769  struct fcoe_hdr *hp;
1770 
1771  fr = fcoe_dev_from_skb(skb);
1772  lport = fr->fr_dev;
1773  if (unlikely(!lport)) {
1774  if (skb->destructor != fcoe_percpu_flush_done)
1775  FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1776  kfree_skb(skb);
1777  return;
1778  }
1779 
1780  FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1781  "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1782  skb->len, skb->data_len,
1783  skb->head, skb->data, skb_tail_pointer(skb),
1784  skb_end_pointer(skb), skb->csum,
1785  skb->dev ? skb->dev->name : "<NULL>");
1786 
1787  port = lport_priv(lport);
1788  skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
1789 
1790  /*
1791  * Frame length checks and setting up the header pointers
1792  * was done in fcoe_rcv already.
1793  */
1794  hp = (struct fcoe_hdr *) skb_network_header(skb);
1795 
1796  stats = per_cpu_ptr(lport->stats, get_cpu());
1797  if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1798  if (stats->ErrorFrames < 5)
1799  printk(KERN_WARNING "fcoe: FCoE version "
1800  "mismatch: The frame has "
1801  "version %x, but the "
1802  "initiator supports version "
1803  "%x\n", FC_FCOE_DECAPS_VER(hp),
1804  FC_FCOE_VER);
1805  goto drop;
1806  }
1807 
1808  skb_pull(skb, sizeof(struct fcoe_hdr));
1809  fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1810 
1811  stats->RxFrames++;
1812  stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1813 
1814  fp = (struct fc_frame *)skb;
1815  fc_frame_init(fp);
1816  fr_dev(fp) = lport;
1817  fr_sof(fp) = hp->fcoe_sof;
1818 
1819  /* Copy out the CRC and EOF trailer for access */
1820  if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
1821  goto drop;
1822  fr_eof(fp) = crc_eof.fcoe_eof;
1823  fr_crc(fp) = crc_eof.fcoe_crc32;
1824  if (pskb_trim(skb, fr_len))
1825  goto drop;
1826 
1827  if (!fcoe_filter_frames(lport, fp)) {
1828  put_cpu();
1829  fc_exch_recv(lport, fp);
1830  return;
1831  }
1832 drop:
1833  stats->ErrorFrames++;
1834  put_cpu();
1835  kfree_skb(skb);
1836 }
1837 
1844 static int fcoe_percpu_receive_thread(void *arg)
1845 {
1846  struct fcoe_percpu_s *p = arg;
1847  struct sk_buff *skb;
1848  struct sk_buff_head tmp;
1849 
1850  skb_queue_head_init(&tmp);
1851 
1852  set_user_nice(current, -20);
1853 
1854 retry:
1855  while (!kthread_should_stop()) {
1856 
1857  spin_lock_bh(&p->fcoe_rx_list.lock);
1858  skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
1859 
1860  if (!skb_queue_len(&tmp)) {
1862  spin_unlock_bh(&p->fcoe_rx_list.lock);
1863  schedule();
1865  goto retry;
1866  }
1867 
1868  spin_unlock_bh(&p->fcoe_rx_list.lock);
1869 
1870  while ((skb = __skb_dequeue(&tmp)) != NULL)
1871  fcoe_recv_frame(skb);
1872 
1873  }
1874  return 0;
1875 }
1876 
1880 static void fcoe_dev_setup(void)
1881 {
1882  register_dcbevent_notifier(&dcb_notifier);
1883  register_netdevice_notifier(&fcoe_notifier);
1884 }
1885 
1889 static void fcoe_dev_cleanup(void)
1890 {
1891  unregister_dcbevent_notifier(&dcb_notifier);
1892  unregister_netdevice_notifier(&fcoe_notifier);
1893 }
1894 
1895 static struct fcoe_interface *
1896 fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
1897 {
1898  struct fcoe_interface *fcoe;
1899  struct net_device *real_dev;
1900 
1901  list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1902  if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
1903  real_dev = vlan_dev_real_dev(fcoe->netdev);
1904  else
1905  real_dev = fcoe->netdev;
1906 
1907  if (netdev == real_dev)
1908  return fcoe;
1909  }
1910  return NULL;
1911 }
1912 
1913 static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1914  ulong event, void *ptr)
1915 {
1916  struct dcb_app_type *entry = ptr;
1917  struct fcoe_ctlr *ctlr;
1918  struct fcoe_interface *fcoe;
1919  struct net_device *netdev;
1920  int prio;
1921 
1922  if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
1923  return NOTIFY_OK;
1924 
1925  netdev = dev_get_by_index(&init_net, entry->ifindex);
1926  if (!netdev)
1927  return NOTIFY_OK;
1928 
1929  fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
1930  dev_put(netdev);
1931  if (!fcoe)
1932  return NOTIFY_OK;
1933 
1934  ctlr = fcoe_to_ctlr(fcoe);
1935 
1936  if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
1937  prio = ffs(entry->app.priority) - 1;
1938  else
1939  prio = entry->app.priority;
1940 
1941  if (prio < 0)
1942  return NOTIFY_OK;
1943 
1944  if (entry->app.protocol == ETH_P_FIP ||
1945  entry->app.protocol == ETH_P_FCOE)
1946  ctlr->priority = prio;
1947 
1948  if (entry->app.protocol == ETH_P_FCOE)
1949  fcoe->priority = prio;
1950 
1951  return NOTIFY_OK;
1952 }
1953 
1964 static int fcoe_device_notification(struct notifier_block *notifier,
1965  ulong event, void *ptr)
1966 {
1967  struct fc_lport *lport = NULL;
1968  struct net_device *netdev = ptr;
1969  struct fcoe_ctlr *ctlr;
1970  struct fcoe_interface *fcoe;
1971  struct fcoe_port *port;
1972  struct fc_stats *stats;
1973  u32 link_possible = 1;
1974  u32 mfs;
1975  int rc = NOTIFY_OK;
1976 
1977  list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1978  if (fcoe->netdev == netdev) {
1979  ctlr = fcoe_to_ctlr(fcoe);
1980  lport = ctlr->lp;
1981  break;
1982  }
1983  }
1984  if (!lport) {
1985  rc = NOTIFY_DONE;
1986  goto out;
1987  }
1988 
1989  switch (event) {
1990  case NETDEV_DOWN:
1991  case NETDEV_GOING_DOWN:
1992  link_possible = 0;
1993  break;
1994  case NETDEV_UP:
1995  case NETDEV_CHANGE:
1996  break;
1997  case NETDEV_CHANGEMTU:
1998  if (netdev->features & NETIF_F_FCOE_MTU)
1999  break;
2000  mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
2002  if (mfs >= FC_MIN_MAX_FRAME)
2003  fc_set_mfs(lport, mfs);
2004  break;
2005  case NETDEV_REGISTER:
2006  break;
2007  case NETDEV_UNREGISTER:
2008  list_del(&fcoe->list);
2009  port = lport_priv(ctlr->lp);
2010  queue_work(fcoe_wq, &port->destroy_work);
2011  goto out;
2012  break;
2013  case NETDEV_FEAT_CHANGE:
2014  fcoe_netdev_features_change(lport, netdev);
2015  break;
2016  default:
2017  FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
2018  "from netdev netlink\n", event);
2019  }
2020 
2021  fcoe_link_speed_update(lport);
2022 
2023  if (link_possible && !fcoe_link_ok(lport))
2024  fcoe_ctlr_link_up(ctlr);
2025  else if (fcoe_ctlr_link_down(ctlr)) {
2026  stats = per_cpu_ptr(lport->stats, get_cpu());
2027  stats->LinkFailureCount++;
2028  put_cpu();
2029  fcoe_clean_pending_queue(lport);
2030  }
2031 out:
2032  return rc;
2033 }
2034 
2043 static int fcoe_disable(struct net_device *netdev)
2044 {
2045  struct fcoe_ctlr *ctlr;
2046  struct fcoe_interface *fcoe;
2047  int rc = 0;
2048 
2049  mutex_lock(&fcoe_config_mutex);
2050 
2051  rtnl_lock();
2052  fcoe = fcoe_hostlist_lookup_port(netdev);
2053  rtnl_unlock();
2054 
2055  if (fcoe) {
2056  ctlr = fcoe_to_ctlr(fcoe);
2057  fcoe_ctlr_link_down(ctlr);
2059  } else
2060  rc = -ENODEV;
2061 
2062  mutex_unlock(&fcoe_config_mutex);
2063  return rc;
2064 }
2065 
2074 static int fcoe_enable(struct net_device *netdev)
2075 {
2076  struct fcoe_ctlr *ctlr;
2077  struct fcoe_interface *fcoe;
2078  int rc = 0;
2079 
2080  mutex_lock(&fcoe_config_mutex);
2081  rtnl_lock();
2082  fcoe = fcoe_hostlist_lookup_port(netdev);
2083  rtnl_unlock();
2084 
2085  if (!fcoe) {
2086  rc = -ENODEV;
2087  goto out;
2088  }
2089 
2090  ctlr = fcoe_to_ctlr(fcoe);
2091 
2092  if (!fcoe_link_ok(ctlr->lp))
2093  fcoe_ctlr_link_up(ctlr);
2094 
2095 out:
2096  mutex_unlock(&fcoe_config_mutex);
2097  return rc;
2098 }
2099 
2108 static int fcoe_destroy(struct net_device *netdev)
2109 {
2110  struct fcoe_ctlr *ctlr;
2111  struct fcoe_interface *fcoe;
2112  struct fc_lport *lport;
2113  struct fcoe_port *port;
2114  int rc = 0;
2115 
2116  mutex_lock(&fcoe_config_mutex);
2117  rtnl_lock();
2118  fcoe = fcoe_hostlist_lookup_port(netdev);
2119  if (!fcoe) {
2120  rc = -ENODEV;
2121  goto out_nodev;
2122  }
2123  ctlr = fcoe_to_ctlr(fcoe);
2124  lport = ctlr->lp;
2125  port = lport_priv(lport);
2126  list_del(&fcoe->list);
2127  queue_work(fcoe_wq, &port->destroy_work);
2128 out_nodev:
2129  rtnl_unlock();
2130  mutex_unlock(&fcoe_config_mutex);
2131  return rc;
2132 }
2133 
2138 static void fcoe_destroy_work(struct work_struct *work)
2139 {
2140  struct fcoe_port *port;
2141  struct fcoe_interface *fcoe;
2142 
2143  port = container_of(work, struct fcoe_port, destroy_work);
2144  mutex_lock(&fcoe_config_mutex);
2145 
2146  fcoe = port->priv;
2147  fcoe_if_destroy(port->lport);
2148  fcoe_interface_cleanup(fcoe);
2149 
2150  mutex_unlock(&fcoe_config_mutex);
2151 }
2152 
2162 static bool fcoe_match(struct net_device *netdev)
2163 {
2164  return true;
2165 }
2166 
2173 static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2174 {
2175 #ifdef CONFIG_DCB
2176  int dcbx;
2177  u8 fup, up;
2178  struct net_device *netdev = fcoe->realdev;
2179  struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2180  struct dcb_app app = {
2181  .priority = 0,
2182  .protocol = ETH_P_FCOE
2183  };
2184 
2185  /* setup DCB priority attributes. */
2186  if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
2187  dcbx = netdev->dcbnl_ops->getdcbx(netdev);
2188 
2189  if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
2191  up = dcb_ieee_getapp_mask(netdev, &app);
2192  app.protocol = ETH_P_FIP;
2193  fup = dcb_ieee_getapp_mask(netdev, &app);
2194  } else {
2196  up = dcb_getapp(netdev, &app);
2197  app.protocol = ETH_P_FIP;
2198  fup = dcb_getapp(netdev, &app);
2199  }
2200 
2201  fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
2202  ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
2203  }
2204 #endif
2205 }
2206 
2216 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2217 {
2218  int rc = 0;
2219  struct fcoe_ctlr_device *ctlr_dev;
2220  struct fcoe_ctlr *ctlr;
2221  struct fcoe_interface *fcoe;
2222  struct fc_lport *lport;
2223 
2224  mutex_lock(&fcoe_config_mutex);
2225  rtnl_lock();
2226 
2227  /* look for existing lport */
2228  if (fcoe_hostlist_lookup(netdev)) {
2229  rc = -EEXIST;
2230  goto out_nodev;
2231  }
2232 
2233  fcoe = fcoe_interface_create(netdev, fip_mode);
2234  if (IS_ERR(fcoe)) {
2235  rc = PTR_ERR(fcoe);
2236  goto out_nodev;
2237  }
2238 
2239  ctlr = fcoe_to_ctlr(fcoe);
2240  ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
2241  lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
2242  if (IS_ERR(lport)) {
2243  printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2244  netdev->name);
2245  rc = -EIO;
2246  rtnl_unlock();
2247  fcoe_interface_cleanup(fcoe);
2248  goto out_nortnl;
2249  }
2250 
2251  /* Make this the "master" N_Port */
2252  ctlr->lp = lport;
2253 
2254  /* setup DCB priority attributes. */
2255  fcoe_dcb_create(fcoe);
2256 
2257  /* add to lports list */
2258  fcoe_hostlist_add(lport);
2259 
2260  /* start FIP Discovery and FLOGI */
2261  lport->boot_time = jiffies;
2262  fc_fabric_login(lport);
2263  if (!fcoe_link_ok(lport)) {
2264  rtnl_unlock();
2265  fcoe_ctlr_link_up(ctlr);
2266  mutex_unlock(&fcoe_config_mutex);
2267  return rc;
2268  }
2269 
2270 out_nodev:
2271  rtnl_unlock();
2272 out_nortnl:
2273  mutex_unlock(&fcoe_config_mutex);
2274  return rc;
2275 }
2276 
2284 static int fcoe_link_speed_update(struct fc_lport *lport)
2285 {
2286  struct net_device *netdev = fcoe_netdev(lport);
2287  struct ethtool_cmd ecmd;
2288 
2289  if (!__ethtool_get_settings(netdev, &ecmd)) {
2290  lport->link_supported_speeds &=
2292  if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2295  if (ecmd.supported & SUPPORTED_10000baseT_Full)
2296  lport->link_supported_speeds |=
2298  switch (ethtool_cmd_speed(&ecmd)) {
2299  case SPEED_1000:
2300  lport->link_speed = FC_PORTSPEED_1GBIT;
2301  break;
2302  case SPEED_10000:
2304  break;
2305  }
2306  return 0;
2307  }
2308  return -1;
2309 }
2310 
2318 static int fcoe_link_ok(struct fc_lport *lport)
2319 {
2320  struct net_device *netdev = fcoe_netdev(lport);
2321 
2322  if (netif_oper_up(netdev))
2323  return 0;
2324  return -1;
2325 }
2326 
2338 static void fcoe_percpu_clean(struct fc_lport *lport)
2339 {
2340  struct fcoe_percpu_s *pp;
2341  struct sk_buff *skb;
2342  unsigned int cpu;
2343 
2344  for_each_possible_cpu(cpu) {
2345  pp = &per_cpu(fcoe_percpu, cpu);
2346 
2347  if (!pp->thread || !cpu_online(cpu))
2348  continue;
2349 
2350  skb = dev_alloc_skb(0);
2351  if (!skb)
2352  continue;
2353 
2354  skb->destructor = fcoe_percpu_flush_done;
2355 
2356  spin_lock_bh(&pp->fcoe_rx_list.lock);
2357  __skb_queue_tail(&pp->fcoe_rx_list, skb);
2358  if (pp->fcoe_rx_list.qlen == 1)
2359  wake_up_process(pp->thread);
2360  spin_unlock_bh(&pp->fcoe_rx_list.lock);
2361 
2362  wait_for_completion(&fcoe_flush_completion);
2363  }
2364 }
2365 
2372 static int fcoe_reset(struct Scsi_Host *shost)
2373 {
2374  struct fc_lport *lport = shost_priv(shost);
2375  struct fcoe_port *port = lport_priv(lport);
2376  struct fcoe_interface *fcoe = port->priv;
2377  struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2378 
2379  fcoe_ctlr_link_down(ctlr);
2381  if (!fcoe_link_ok(ctlr->lp))
2382  fcoe_ctlr_link_up(ctlr);
2383  return 0;
2384 }
2385 
2394 static struct fcoe_interface *
2395 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2396 {
2397  struct fcoe_interface *fcoe;
2398 
2399  list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2400  if (fcoe->netdev == netdev)
2401  return fcoe;
2402  }
2403  return NULL;
2404 }
2405 
2415 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2416 {
2417  struct fcoe_ctlr *ctlr;
2418  struct fcoe_interface *fcoe;
2419 
2420  fcoe = fcoe_hostlist_lookup_port(netdev);
2421  ctlr = fcoe_to_ctlr(fcoe);
2422  return (fcoe) ? ctlr->lp : NULL;
2423 }
2424 
2434 static int fcoe_hostlist_add(const struct fc_lport *lport)
2435 {
2436  struct fcoe_interface *fcoe;
2437  struct fcoe_port *port;
2438 
2439  fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2440  if (!fcoe) {
2441  port = lport_priv(lport);
2442  fcoe = port->priv;
2443  list_add_tail(&fcoe->list, &fcoe_hostlist);
2444  }
2445  return 0;
2446 }
2447 
2448 
2449 static struct fcoe_transport fcoe_sw_transport = {
2450  .name = {FCOE_TRANSPORT_DEFAULT},
2451  .attached = false,
2452  .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
2453  .match = fcoe_match,
2454  .create = fcoe_create,
2455  .destroy = fcoe_destroy,
2456  .enable = fcoe_enable,
2457  .disable = fcoe_disable,
2458 };
2459 
2465 static int __init fcoe_init(void)
2466 {
2467  struct fcoe_percpu_s *p;
2468  unsigned int cpu;
2469  int rc = 0;
2470 
2471  fcoe_wq = alloc_workqueue("fcoe", 0, 0);
2472  if (!fcoe_wq)
2473  return -ENOMEM;
2474 
2475  /* register as a fcoe transport */
2476  rc = fcoe_transport_attach(&fcoe_sw_transport);
2477  if (rc) {
2478  printk(KERN_ERR "failed to register an fcoe transport, check "
2479  "if libfcoe is loaded\n");
2480  return rc;
2481  }
2482 
2483  mutex_lock(&fcoe_config_mutex);
2484 
2485  for_each_possible_cpu(cpu) {
2486  p = &per_cpu(fcoe_percpu, cpu);
2487  skb_queue_head_init(&p->fcoe_rx_list);
2488  }
2489 
2490  for_each_online_cpu(cpu)
2491  fcoe_percpu_thread_create(cpu);
2492 
2493  /* Initialize per CPU interrupt thread */
2494  rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2495  if (rc)
2496  goto out_free;
2497 
2498  /* Setup link change notification */
2499  fcoe_dev_setup();
2500 
2501  rc = fcoe_if_init();
2502  if (rc)
2503  goto out_free;
2504 
2505  mutex_unlock(&fcoe_config_mutex);
2506  return 0;
2507 
2508 out_free:
2509  for_each_online_cpu(cpu) {
2510  fcoe_percpu_thread_destroy(cpu);
2511  }
2512  mutex_unlock(&fcoe_config_mutex);
2513  destroy_workqueue(fcoe_wq);
2514  return rc;
2515 }
2516 module_init(fcoe_init);
2517 
2523 static void __exit fcoe_exit(void)
2524 {
2525  struct fcoe_interface *fcoe, *tmp;
2526  struct fcoe_ctlr *ctlr;
2527  struct fcoe_port *port;
2528  unsigned int cpu;
2529 
2530  mutex_lock(&fcoe_config_mutex);
2531 
2532  fcoe_dev_cleanup();
2533 
2534  /* releases the associated fcoe hosts */
2535  rtnl_lock();
2536  list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2537  list_del(&fcoe->list);
2538  ctlr = fcoe_to_ctlr(fcoe);
2539  port = lport_priv(ctlr->lp);
2540  queue_work(fcoe_wq, &port->destroy_work);
2541  }
2542  rtnl_unlock();
2543 
2544  unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2545 
2546  for_each_online_cpu(cpu)
2547  fcoe_percpu_thread_destroy(cpu);
2548 
2549  mutex_unlock(&fcoe_config_mutex);
2550 
2551  /*
2552  * destroy_work's may be chained but destroy_workqueue()
2553  * can take care of them. Just kill the fcoe_wq.
2554  */
2555  destroy_workqueue(fcoe_wq);
2556 
2557  /*
2558  * Detaching from the scsi transport must happen after all
2559  * destroys are done on the fcoe_wq. destroy_workqueue will
2560  * enusre the fcoe_wq is flushed.
2561  */
2562  fcoe_if_exit();
2563 
2564  /* detach from fcoe transport */
2565  fcoe_transport_detach(&fcoe_sw_transport);
2566 }
2567 module_exit(fcoe_exit);
2568 
2578 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2579 {
2580  struct fcoe_ctlr *fip = arg;
2581  struct fc_exch *exch = fc_seq_exch(seq);
2582  struct fc_lport *lport = exch->lp;
2583  u8 *mac;
2584 
2585  if (IS_ERR(fp))
2586  goto done;
2587 
2588  mac = fr_cb(fp)->granted_mac;
2589  /* pre-FIP */
2590  if (is_zero_ether_addr(mac))
2591  fcoe_ctlr_recv_flogi(fip, lport, fp);
2592  if (!is_zero_ether_addr(mac))
2593  fcoe_update_src_mac(lport, mac);
2594 done:
2595  fc_lport_flogi_resp(seq, fp, lport);
2596 }
2597 
2607 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2608 {
2609  struct fc_lport *lport = arg;
2610  static u8 zero_mac[ETH_ALEN] = { 0 };
2611 
2612  if (!IS_ERR(fp))
2613  fcoe_update_src_mac(lport, zero_mac);
2614  fc_lport_logo_resp(seq, fp, lport);
2615 }
2616 
2626 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2627  struct fc_frame *fp, unsigned int op,
2628  void (*resp)(struct fc_seq *,
2629  struct fc_frame *,
2630  void *),
2631  void *arg, u32 timeout)
2632 {
2633  struct fcoe_port *port = lport_priv(lport);
2634  struct fcoe_interface *fcoe = port->priv;
2635  struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
2636  struct fc_frame_header *fh = fc_frame_header_get(fp);
2637 
2638  switch (op) {
2639  case ELS_FLOGI:
2640  case ELS_FDISC:
2641  if (lport->point_to_multipoint)
2642  break;
2643  return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2644  fip, timeout);
2645  case ELS_LOGO:
2646  /* only hook onto fabric logouts, not port logouts */
2647  if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2648  break;
2649  return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2650  lport, timeout);
2651  }
2652  return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2653 }
2654 
2662 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2663 {
2664  struct Scsi_Host *shost = vport_to_shost(vport);
2665  struct fc_lport *n_port = shost_priv(shost);
2666  struct fcoe_port *port = lport_priv(n_port);
2667  struct fcoe_interface *fcoe = port->priv;
2668  struct net_device *netdev = fcoe->netdev;
2669  struct fc_lport *vn_port;
2670  int rc;
2671  char buf[32];
2672 
2673  rc = fcoe_validate_vport_create(vport);
2674  if (rc) {
2675  fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
2676  printk(KERN_ERR "fcoe: Failed to create vport, "
2677  "WWPN (0x%s) already exists\n",
2678  buf);
2679  return rc;
2680  }
2681 
2682  mutex_lock(&fcoe_config_mutex);
2683  rtnl_lock();
2684  vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2685  rtnl_unlock();
2686  mutex_unlock(&fcoe_config_mutex);
2687 
2688  if (IS_ERR(vn_port)) {
2689  printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2690  netdev->name);
2691  return -EIO;
2692  }
2693 
2694  if (disabled) {
2695  fc_vport_set_state(vport, FC_VPORT_DISABLED);
2696  } else {
2697  vn_port->boot_time = jiffies;
2698  fc_fabric_login(vn_port);
2699  fc_vport_setlink(vn_port);
2700  }
2701  return 0;
2702 }
2703 
2710 static int fcoe_vport_destroy(struct fc_vport *vport)
2711 {
2712  struct Scsi_Host *shost = vport_to_shost(vport);
2713  struct fc_lport *n_port = shost_priv(shost);
2714  struct fc_lport *vn_port = vport->dd_data;
2715 
2716  mutex_lock(&n_port->lp_mutex);
2717  list_del(&vn_port->list);
2718  mutex_unlock(&n_port->lp_mutex);
2719 
2720  mutex_lock(&fcoe_config_mutex);
2721  fcoe_if_destroy(vn_port);
2722  mutex_unlock(&fcoe_config_mutex);
2723 
2724  return 0;
2725 }
2726 
2732 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2733 {
2734  struct fc_lport *lport = vport->dd_data;
2735 
2736  if (disable) {
2737  fc_vport_set_state(vport, FC_VPORT_DISABLED);
2738  fc_fabric_logoff(lport);
2739  } else {
2740  lport->boot_time = jiffies;
2741  fc_fabric_login(lport);
2742  fc_vport_setlink(lport);
2743  }
2744 
2745  return 0;
2746 }
2747 
2756 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2757 {
2758  struct fc_lport *lport = vport->dd_data;
2759  struct fc_frame *fp;
2760  size_t len;
2761 
2763  "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2764  fcoe_netdev(lport)->name, vport->symbolic_name);
2765 
2766  if (lport->state != LPORT_ST_READY)
2767  return;
2768 
2769  len = strnlen(fc_host_symbolic_name(lport->host), 255);
2770  fp = fc_frame_alloc(lport,
2771  sizeof(struct fc_ct_hdr) +
2772  sizeof(struct fc_ns_rspn) + len);
2773  if (!fp)
2774  return;
2775  lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2776  NULL, NULL, 3 * lport->r_a_tov);
2777 }
2778 
2784 static void fcoe_get_lesb(struct fc_lport *lport,
2785  struct fc_els_lesb *fc_lesb)
2786 {
2787  struct net_device *netdev = fcoe_netdev(lport);
2788 
2789  __fcoe_get_lesb(lport, fc_lesb, netdev);
2790 }
2791 
2792 static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
2793 {
2794  struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2795  struct net_device *netdev = fcoe_netdev(fip->lp);
2796  struct fcoe_fc_els_lesb *fcoe_lesb;
2797  struct fc_els_lesb fc_lesb;
2798 
2799  __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
2800  fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
2801 
2802  ctlr_dev->lesb.lesb_link_fail =
2803  ntohl(fcoe_lesb->lesb_link_fail);
2804  ctlr_dev->lesb.lesb_vlink_fail =
2805  ntohl(fcoe_lesb->lesb_vlink_fail);
2806  ctlr_dev->lesb.lesb_miss_fka =
2807  ntohl(fcoe_lesb->lesb_miss_fka);
2808  ctlr_dev->lesb.lesb_symb_err =
2809  ntohl(fcoe_lesb->lesb_symb_err);
2810  ctlr_dev->lesb.lesb_err_block =
2811  ntohl(fcoe_lesb->lesb_err_block);
2812  ctlr_dev->lesb.lesb_fcs_error =
2813  ntohl(fcoe_lesb->lesb_fcs_error);
2814 }
2815 
2816 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
2817 {
2818  struct fcoe_ctlr_device *ctlr_dev =
2819  fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2820  struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2821  struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
2822 
2823  fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
2824 }
2825 
2838 static void fcoe_set_port_id(struct fc_lport *lport,
2839  u32 port_id, struct fc_frame *fp)
2840 {
2841  struct fcoe_port *port = lport_priv(lport);
2842  struct fcoe_interface *fcoe = port->priv;
2843  struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2844 
2845  if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2846  fcoe_ctlr_recv_flogi(ctlr, lport, fp);
2847 }