Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_hbadisc.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
46 
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray[] = {
49  0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50  0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51  0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52  0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53  0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54  0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55  0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56  0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57  0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58  0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59  0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60  0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61  0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
62 };
63 
64 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
65 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
67 static int lpfc_fcf_inuse(struct lpfc_hba *);
68 
69 void
71 {
72  struct lpfc_rport_data *rdata;
73  struct lpfc_nodelist * ndlp;
74  struct lpfc_hba *phba;
75 
76  rdata = rport->dd_data;
77  ndlp = rdata->pnode;
78 
79  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
80  if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
81  printk(KERN_ERR "Cannot find remote node"
82  " to terminate I/O Data x%x\n",
83  rport->port_id);
84  return;
85  }
86 
87  phba = ndlp->phba;
88 
90  "rport terminate: sid:x%x did:x%x flg:x%x",
91  ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
92 
93  if (ndlp->nlp_sid != NLP_NO_SID) {
95  &phba->sli.ring[phba->sli.fcp_ring],
96  ndlp->nlp_sid, 0, LPFC_CTX_TGT);
97  }
98 }
99 
100 /*
101  * This function will be called when dev_loss_tmo fire.
102  */
103 void
105 {
106  struct lpfc_rport_data *rdata;
107  struct lpfc_nodelist * ndlp;
108  struct lpfc_vport *vport;
109  struct lpfc_hba *phba;
110  struct lpfc_work_evt *evtp;
111  int put_node;
112  int put_rport;
113 
114  rdata = rport->dd_data;
115  ndlp = rdata->pnode;
116  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
117  return;
118 
119  vport = ndlp->vport;
120  phba = vport->phba;
121 
123  "rport devlosscb: sid:x%x did:x%x flg:x%x",
124  ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
125 
127  "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128  ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
129 
130  /* Don't defer this if we are in the process of deleting the vport
131  * or unloading the driver. The unload will cleanup the node
132  * appropriately we just need to cleanup the ndlp rport info here.
133  */
134  if (vport->load_flag & FC_UNLOADING) {
135  put_node = rdata->pnode != NULL;
136  put_rport = ndlp->rport != NULL;
137  rdata->pnode = NULL;
138  ndlp->rport = NULL;
139  if (put_node)
140  lpfc_nlp_put(ndlp);
141  if (put_rport)
142  put_device(&rport->dev);
143  return;
144  }
145 
146  if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
147  return;
148 
149  if (ndlp->nlp_type & NLP_FABRIC) {
150 
151  /* If the WWPN of the rport and ndlp don't match, ignore it */
152  if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
153  put_device(&rport->dev);
154  return;
155  }
156  }
157 
158  evtp = &ndlp->dev_loss_evt;
159 
160  if (!list_empty(&evtp->evt_listp))
161  return;
162 
163  spin_lock_irq(&phba->hbalock);
164  /* We need to hold the node by incrementing the reference
165  * count until this queued work is done
166  */
167  evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168  if (evtp->evt_arg1) {
169  evtp->evt = LPFC_EVT_DEV_LOSS;
170  list_add_tail(&evtp->evt_listp, &phba->work_list);
171  lpfc_worker_wake_up(phba);
172  }
173  spin_unlock_irq(&phba->hbalock);
174 
175  return;
176 }
177 
188 static int
189 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
190 {
191  struct lpfc_rport_data *rdata;
192  struct fc_rport *rport;
193  struct lpfc_vport *vport;
194  struct lpfc_hba *phba;
195  uint8_t *name;
196  int put_node;
197  int put_rport;
198  int warn_on = 0;
199  int fcf_inuse = 0;
200 
201  rport = ndlp->rport;
202 
203  if (!rport)
204  return fcf_inuse;
205 
206  rdata = rport->dd_data;
207  name = (uint8_t *) &ndlp->nlp_portname;
208  vport = ndlp->vport;
209  phba = vport->phba;
210 
211  if (phba->sli_rev == LPFC_SLI_REV4)
212  fcf_inuse = lpfc_fcf_inuse(phba);
213 
215  "rport devlosstmo:did:x%x type:x%x id:x%x",
216  ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
217 
219  "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220  ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
221 
222  /* Don't defer this if we are in the process of deleting the vport
223  * or unloading the driver. The unload will cleanup the node
224  * appropriately we just need to cleanup the ndlp rport info here.
225  */
226  if (vport->load_flag & FC_UNLOADING) {
227  if (ndlp->nlp_sid != NLP_NO_SID) {
228  /* flush the target */
229  lpfc_sli_abort_iocb(vport,
230  &phba->sli.ring[phba->sli.fcp_ring],
231  ndlp->nlp_sid, 0, LPFC_CTX_TGT);
232  }
233  put_node = rdata->pnode != NULL;
234  put_rport = ndlp->rport != NULL;
235  rdata->pnode = NULL;
236  ndlp->rport = NULL;
237  if (put_node)
238  lpfc_nlp_put(ndlp);
239  if (put_rport)
240  put_device(&rport->dev);
241  return fcf_inuse;
242  }
243 
244  if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
246  "0284 Devloss timeout Ignored on "
247  "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
248  "NPort x%x\n",
249  *name, *(name+1), *(name+2), *(name+3),
250  *(name+4), *(name+5), *(name+6), *(name+7),
251  ndlp->nlp_DID);
252  return fcf_inuse;
253  }
254 
255  if (ndlp->nlp_type & NLP_FABRIC) {
256  /* We will clean up these Nodes in linkup */
257  put_node = rdata->pnode != NULL;
258  put_rport = ndlp->rport != NULL;
259  rdata->pnode = NULL;
260  ndlp->rport = NULL;
261  if (put_node)
262  lpfc_nlp_put(ndlp);
263  if (put_rport)
264  put_device(&rport->dev);
265  return fcf_inuse;
266  }
267 
268  if (ndlp->nlp_sid != NLP_NO_SID) {
269  warn_on = 1;
270  /* flush the target */
271  lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
272  ndlp->nlp_sid, 0, LPFC_CTX_TGT);
273  }
274 
275  if (warn_on) {
277  "0203 Devloss timeout on "
278  "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
279  "NPort x%06x Data: x%x x%x x%x\n",
280  *name, *(name+1), *(name+2), *(name+3),
281  *(name+4), *(name+5), *(name+6), *(name+7),
282  ndlp->nlp_DID, ndlp->nlp_flag,
283  ndlp->nlp_state, ndlp->nlp_rpi);
284  } else {
286  "0204 Devloss timeout on "
287  "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
288  "NPort x%06x Data: x%x x%x x%x\n",
289  *name, *(name+1), *(name+2), *(name+3),
290  *(name+4), *(name+5), *(name+6), *(name+7),
291  ndlp->nlp_DID, ndlp->nlp_flag,
292  ndlp->nlp_state, ndlp->nlp_rpi);
293  }
294 
295  put_node = rdata->pnode != NULL;
296  put_rport = ndlp->rport != NULL;
297  rdata->pnode = NULL;
298  ndlp->rport = NULL;
299  if (put_node)
300  lpfc_nlp_put(ndlp);
301  if (put_rport)
302  put_device(&rport->dev);
303 
304  if (!(vport->load_flag & FC_UNLOADING) &&
305  !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
306  !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
307  (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
308  (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
309  (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
311 
312  return fcf_inuse;
313 }
314 
333 static void
334 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
335  uint32_t nlp_did)
336 {
337  /* If devloss timeout happened to a remote node when FCF had no
338  * longer been in-use, do nothing.
339  */
340  if (!fcf_inuse)
341  return;
342 
343  if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
344  spin_lock_irq(&phba->hbalock);
345  if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
346  if (phba->hba_flag & HBA_DEVLOSS_TMO) {
347  spin_unlock_irq(&phba->hbalock);
348  return;
349  }
350  phba->hba_flag |= HBA_DEVLOSS_TMO;
352  "2847 Last remote node (x%x) using "
353  "FCF devloss tmo\n", nlp_did);
354  }
355  if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
356  spin_unlock_irq(&phba->hbalock);
358  "2868 Devloss tmo to FCF rediscovery "
359  "in progress\n");
360  return;
361  }
362  if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
363  spin_unlock_irq(&phba->hbalock);
365  "2869 Devloss tmo to idle FIP engine, "
366  "unreg in-use FCF and rescan.\n");
367  /* Unregister in-use FCF and rescan */
369  return;
370  }
371  spin_unlock_irq(&phba->hbalock);
372  if (phba->hba_flag & FCF_TS_INPROG)
374  "2870 FCF table scan in progress\n");
375  if (phba->hba_flag & FCF_RR_INPROG)
377  "2871 FLOGI roundrobin FCF failover "
378  "in progress\n");
379  }
381 }
382 
393 struct lpfc_fast_path_event *
395  struct lpfc_fast_path_event *ret;
396 
397  /* If there are lot of fast event do not exhaust memory due to this */
399  return NULL;
400 
401  ret = kzalloc(sizeof(struct lpfc_fast_path_event),
402  GFP_ATOMIC);
403  if (ret) {
405  INIT_LIST_HEAD(&ret->work_evt.evt_listp);
407  }
408  return ret;
409 }
410 
419 void
421  struct lpfc_fast_path_event *evt) {
422 
424  kfree(evt);
425 }
426 
436 static void
437 lpfc_send_fastpath_evt(struct lpfc_hba *phba,
438  struct lpfc_work_evt *evtp)
439 {
440  unsigned long evt_category, evt_sub_category;
441  struct lpfc_fast_path_event *fast_evt_data;
442  char *evt_data;
443  uint32_t evt_data_size;
444  struct Scsi_Host *shost;
445 
446  fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
447  work_evt);
448 
449  evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
450  evt_sub_category = (unsigned long) fast_evt_data->un.
451  fabric_evt.subcategory;
452  shost = lpfc_shost_from_vport(fast_evt_data->vport);
453  if (evt_category == FC_REG_FABRIC_EVENT) {
454  if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
455  evt_data = (char *) &fast_evt_data->un.read_check_error;
456  evt_data_size = sizeof(fast_evt_data->un.
457  read_check_error);
458  } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
459  (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
460  evt_data = (char *) &fast_evt_data->un.fabric_evt;
461  evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
462  } else {
463  lpfc_free_fast_evt(phba, fast_evt_data);
464  return;
465  }
466  } else if (evt_category == FC_REG_SCSI_EVENT) {
467  switch (evt_sub_category) {
468  case LPFC_EVENT_QFULL:
469  case LPFC_EVENT_DEVBSY:
470  evt_data = (char *) &fast_evt_data->un.scsi_evt;
471  evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
472  break;
474  evt_data = (char *) &fast_evt_data->un.check_cond_evt;
475  evt_data_size = sizeof(fast_evt_data->un.
476  check_cond_evt);
477  break;
479  evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
480  evt_data_size = sizeof(fast_evt_data->un.
481  queue_depth_evt);
482  break;
483  default:
484  lpfc_free_fast_evt(phba, fast_evt_data);
485  return;
486  }
487  } else {
488  lpfc_free_fast_evt(phba, fast_evt_data);
489  return;
490  }
491 
494  evt_data_size,
495  evt_data,
497 
498  lpfc_free_fast_evt(phba, fast_evt_data);
499  return;
500 }
501 
502 static void
503 lpfc_work_list_done(struct lpfc_hba *phba)
504 {
505  struct lpfc_work_evt *evtp = NULL;
506  struct lpfc_nodelist *ndlp;
507  int free_evt;
508  int fcf_inuse;
509  uint32_t nlp_did;
510 
511  spin_lock_irq(&phba->hbalock);
512  while (!list_empty(&phba->work_list)) {
513  list_remove_head((&phba->work_list), evtp, typeof(*evtp),
514  evt_listp);
515  spin_unlock_irq(&phba->hbalock);
516  free_evt = 1;
517  switch (evtp->evt) {
518  case LPFC_EVT_ELS_RETRY:
519  ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
521  free_evt = 0; /* evt is part of ndlp */
522  /* decrement the node reference count held
523  * for this queued work
524  */
525  lpfc_nlp_put(ndlp);
526  break;
527  case LPFC_EVT_DEV_LOSS:
528  ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
529  fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
530  free_evt = 0;
531  /* decrement the node reference count held for
532  * this queued work
533  */
534  nlp_did = ndlp->nlp_DID;
535  lpfc_nlp_put(ndlp);
536  if (phba->sli_rev == LPFC_SLI_REV4)
537  lpfc_sli4_post_dev_loss_tmo_handler(phba,
538  fcf_inuse,
539  nlp_did);
540  break;
541  case LPFC_EVT_ONLINE:
542  if (phba->link_state < LPFC_LINK_DOWN)
543  *(int *) (evtp->evt_arg1) = lpfc_online(phba);
544  else
545  *(int *) (evtp->evt_arg1) = 0;
546  complete((struct completion *)(evtp->evt_arg2));
547  break;
549  if (phba->link_state >= LPFC_LINK_DOWN)
551  *(int *)(evtp->evt_arg1) = 0;
552  complete((struct completion *)(evtp->evt_arg2));
553  break;
554  case LPFC_EVT_OFFLINE:
555  lpfc_offline(phba);
556  lpfc_sli_brdrestart(phba);
557  *(int *)(evtp->evt_arg1) =
559  lpfc_unblock_mgmt_io(phba);
560  complete((struct completion *)(evtp->evt_arg2));
561  break;
562  case LPFC_EVT_WARM_START:
563  lpfc_offline(phba);
564  lpfc_reset_barrier(phba);
565  lpfc_sli_brdreset(phba);
566  lpfc_hba_down_post(phba);
567  *(int *)(evtp->evt_arg1) =
569  lpfc_unblock_mgmt_io(phba);
570  complete((struct completion *)(evtp->evt_arg2));
571  break;
572  case LPFC_EVT_KILL:
573  lpfc_offline(phba);
574  *(int *)(evtp->evt_arg1)
575  = (phba->pport->stopped)
576  ? 0 : lpfc_sli_brdkill(phba);
577  lpfc_unblock_mgmt_io(phba);
578  complete((struct completion *)(evtp->evt_arg2));
579  break;
581  lpfc_send_fastpath_evt(phba, evtp);
582  free_evt = 0;
583  break;
584  case LPFC_EVT_RESET_HBA:
585  if (!(phba->pport->load_flag & FC_UNLOADING))
586  lpfc_reset_hba(phba);
587  break;
588  }
589  if (free_evt)
590  kfree(evtp);
591  spin_lock_irq(&phba->hbalock);
592  }
593  spin_unlock_irq(&phba->hbalock);
594 
595 }
596 
597 static void
598 lpfc_work_done(struct lpfc_hba *phba)
599 {
600  struct lpfc_sli_ring *pring;
601  uint32_t ha_copy, status, control, work_port_events;
602  struct lpfc_vport **vports;
603  struct lpfc_vport *vport;
604  int i;
605 
606  spin_lock_irq(&phba->hbalock);
607  ha_copy = phba->work_ha;
608  phba->work_ha = 0;
609  spin_unlock_irq(&phba->hbalock);
610 
611  /* First, try to post the next mailbox command to SLI4 device */
612  if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
614 
615  if (ha_copy & HA_ERATT)
616  /* Handle the error attention event */
617  lpfc_handle_eratt(phba);
618 
619  if (ha_copy & HA_MBATT)
621 
622  if (ha_copy & HA_LATT)
623  lpfc_handle_latt(phba);
624 
625  /* Process SLI4 events */
626  if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
627  if (phba->hba_flag & HBA_RRQ_ACTIVE)
629  if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
631  if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
633  if (phba->hba_flag & ASYNC_EVENT)
635  if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
636  spin_lock_irq(&phba->hbalock);
638  spin_unlock_irq(&phba->hbalock);
640  }
641  if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
643  }
644 
645  vports = lpfc_create_vport_work_array(phba);
646  if (vports != NULL)
647  for (i = 0; i <= phba->max_vports; i++) {
648  /*
649  * We could have no vports in array if unloading, so if
650  * this happens then just use the pport
651  */
652  if (vports[i] == NULL && i == 0)
653  vport = phba->pport;
654  else
655  vport = vports[i];
656  if (vport == NULL)
657  break;
658  spin_lock_irq(&vport->work_port_lock);
659  work_port_events = vport->work_port_events;
660  vport->work_port_events &= ~work_port_events;
661  spin_unlock_irq(&vport->work_port_lock);
662  if (work_port_events & WORKER_DISC_TMO)
663  lpfc_disc_timeout_handler(vport);
664  if (work_port_events & WORKER_ELS_TMO)
666  if (work_port_events & WORKER_HB_TMO)
668  if (work_port_events & WORKER_MBOX_TMO)
670  if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
672  if (work_port_events & WORKER_FDMI_TMO)
674  if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
676  if (work_port_events & WORKER_RAMP_UP_QUEUE)
678  if (work_port_events & WORKER_DELAYED_DISC_TMO)
680  }
681  lpfc_destroy_vport_work_array(phba, vports);
682 
683  pring = &phba->sli.ring[LPFC_ELS_RING];
684  status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
685  status >>= (4*LPFC_ELS_RING);
686  if ((status & HA_RXMASK) ||
687  (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
688  (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
689  if (pring->flag & LPFC_STOP_IOCB_EVENT) {
690  pring->flag |= LPFC_DEFERRED_RING_EVENT;
691  /* Set the lpfc data pending flag */
693  } else {
694  pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
696  (status &
697  HA_RXMASK));
698  }
699  if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
700  lpfc_drain_txq(phba);
701  /*
702  * Turn on Ring interrupts
703  */
704  if (phba->sli_rev <= LPFC_SLI_REV3) {
705  spin_lock_irq(&phba->hbalock);
706  control = readl(phba->HCregaddr);
707  if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
709  "WRK Enable ring: cntl:x%x hacopy:x%x",
710  control, ha_copy, 0);
711 
712  control |= (HC_R0INT_ENA << LPFC_ELS_RING);
713  writel(control, phba->HCregaddr);
714  readl(phba->HCregaddr); /* flush */
715  } else {
717  "WRK Ring ok: cntl:x%x hacopy:x%x",
718  control, ha_copy, 0);
719  }
720  spin_unlock_irq(&phba->hbalock);
721  }
722  }
723  lpfc_work_list_done(phba);
724 }
725 
726 int
728 {
729  struct lpfc_hba *phba = p;
730  int rc;
731 
732  set_user_nice(current, -20);
733  current->flags |= PF_NOFREEZE;
734  phba->data_flags = 0;
735 
736  while (!kthread_should_stop()) {
737  /* wait and check worker queue activities */
740  &phba->data_flags)
741  || kthread_should_stop()));
742  /* Signal wakeup shall terminate the worker thread */
743  if (rc) {
745  "0433 Wakeup on signal: rc=x%x\n", rc);
746  break;
747  }
748 
749  /* Attend pending lpfc data processing */
750  lpfc_work_done(phba);
751  }
752  phba->worker_thread = NULL;
754  "0432 Worker thread stopped.\n");
755  return 0;
756 }
757 
758 /*
759  * This is only called to handle FC worker events. Since this a rare
760  * occurrence, we allocate a struct lpfc_work_evt structure here instead of
761  * embedding it in the IOCB.
762  */
763 int
764 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
765  uint32_t evt)
766 {
767  struct lpfc_work_evt *evtp;
768  unsigned long flags;
769 
770  /*
771  * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
772  * be queued to worker thread for processing
773  */
774  evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
775  if (!evtp)
776  return 0;
777 
778  evtp->evt_arg1 = arg1;
779  evtp->evt_arg2 = arg2;
780  evtp->evt = evt;
781 
782  spin_lock_irqsave(&phba->hbalock, flags);
783  list_add_tail(&evtp->evt_listp, &phba->work_list);
784  spin_unlock_irqrestore(&phba->hbalock, flags);
785 
786  lpfc_worker_wake_up(phba);
787 
788  return 1;
789 }
790 
791 void
792 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
793 {
794  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
795  struct lpfc_hba *phba = vport->phba;
796  struct lpfc_nodelist *ndlp, *next_ndlp;
797  int rc;
798 
799  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
800  if (!NLP_CHK_NODE_ACT(ndlp))
801  continue;
802  if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
803  continue;
804  if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
805  ((vport->port_type == LPFC_NPIV_PORT) &&
806  (ndlp->nlp_DID == NameServer_DID)))
807  lpfc_unreg_rpi(vport, ndlp);
808 
809  /* Leave Fabric nodes alone on link down */
810  if ((phba->sli_rev < LPFC_SLI_REV4) &&
811  (!remove && ndlp->nlp_type & NLP_FABRIC))
812  continue;
813  rc = lpfc_disc_state_machine(vport, ndlp, NULL,
814  remove
817  }
819  if (phba->sli_rev == LPFC_SLI_REV4)
821  lpfc_mbx_unreg_vpi(vport);
822  spin_lock_irq(shost->host_lock);
824  spin_unlock_irq(shost->host_lock);
825  }
826 }
827 
828 void
830 {
832 
833  /* Cleanup any outstanding received buffers */
835 
836  /* Cleanup any outstanding RSCN activity */
837  lpfc_els_flush_rscn(vport);
838 
839  /* Cleanup any outstanding ELS commands */
840  lpfc_els_flush_cmd(vport);
841 
842  lpfc_cleanup_rpis(vport, 0);
843 
844  /* Turn off discovery timer if its running */
845  lpfc_can_disctmo(vport);
846 }
847 
848 void
850 {
851  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
852 
854 
856  "Link Down: state:x%x rtry:x%x flg:x%x",
857  vport->port_state, vport->fc_ns_retry, vport->fc_flag);
858 
859  lpfc_port_link_failure(vport);
860 
861  /* Stop delayed Nport discovery */
862  spin_lock_irq(shost->host_lock);
863  vport->fc_flag &= ~FC_DISC_DELAYED;
864  spin_unlock_irq(shost->host_lock);
866 }
867 
868 int
869 lpfc_linkdown(struct lpfc_hba *phba)
870 {
871  struct lpfc_vport *vport = phba->pport;
872  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
873  struct lpfc_vport **vports;
874  LPFC_MBOXQ_t *mb;
875  int i;
876 
877  if (phba->link_state == LPFC_LINK_DOWN)
878  return 0;
879 
880  /* Block all SCSI stack I/Os */
881  lpfc_scsi_dev_block(phba);
882 
883  spin_lock_irq(&phba->hbalock);
884  phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
885  spin_unlock_irq(&phba->hbalock);
886  if (phba->link_state > LPFC_LINK_DOWN) {
887  phba->link_state = LPFC_LINK_DOWN;
888  spin_lock_irq(shost->host_lock);
889  phba->pport->fc_flag &= ~FC_LBIT;
890  spin_unlock_irq(shost->host_lock);
891  }
892  vports = lpfc_create_vport_work_array(phba);
893  if (vports != NULL)
894  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
895  /* Issue a LINK DOWN event to all nodes */
896  lpfc_linkdown_port(vports[i]);
897  }
898  lpfc_destroy_vport_work_array(phba, vports);
899  /* Clean up any firmware default rpi's */
901  if (mb) {
902  lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
903  mb->vport = vport;
905  if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
906  == MBX_NOT_FINISHED) {
907  mempool_free(mb, phba->mbox_mem_pool);
908  }
909  }
910 
911  /* Setup myDID for link up if we are in pt2pt mode */
912  if (phba->pport->fc_flag & FC_PT2PT) {
913  phba->pport->fc_myDID = 0;
915  if (mb) {
916  lpfc_config_link(phba, mb);
918  mb->vport = vport;
919  if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
920  == MBX_NOT_FINISHED) {
921  mempool_free(mb, phba->mbox_mem_pool);
922  }
923  }
924  spin_lock_irq(shost->host_lock);
925  phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
926  spin_unlock_irq(shost->host_lock);
927  }
928 
929  return 0;
930 }
931 
932 static void
933 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
934 {
935  struct lpfc_nodelist *ndlp;
936 
937  list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
938  if (!NLP_CHK_NODE_ACT(ndlp))
939  continue;
940  if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
941  continue;
942  if (ndlp->nlp_type & NLP_FABRIC) {
943  /* On Linkup its safe to clean up the ndlp
944  * from Fabric connections.
945  */
946  if (ndlp->nlp_DID != Fabric_DID)
947  lpfc_unreg_rpi(vport, ndlp);
948  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
949  } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
950  /* Fail outstanding IO now since device is
951  * marked for PLOGI.
952  */
953  lpfc_unreg_rpi(vport, ndlp);
954  }
955  }
956 }
957 
958 static void
959 lpfc_linkup_port(struct lpfc_vport *vport)
960 {
961  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
962  struct lpfc_hba *phba = vport->phba;
963 
964  if ((vport->load_flag & FC_UNLOADING) != 0)
965  return;
966 
968  "Link Up: top:x%x speed:x%x flg:x%x",
969  phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
970 
971  /* If NPIV is not enabled, only bring the physical port up */
972  if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
973  (vport != phba->pport))
974  return;
975 
977 
978  spin_lock_irq(shost->host_lock);
981  vport->fc_flag |= FC_NDISC_ACTIVE;
982  vport->fc_ns_retry = 0;
983  spin_unlock_irq(shost->host_lock);
984 
985  if (vport->fc_flag & FC_LBIT)
986  lpfc_linkup_cleanup_nodes(vport);
987 
988 }
989 
990 static int
991 lpfc_linkup(struct lpfc_hba *phba)
992 {
993  struct lpfc_vport **vports;
994  int i;
995 
996  lpfc_cleanup_wt_rrqs(phba);
997  phba->link_state = LPFC_LINK_UP;
998 
999  /* Unblock fabric iocbs if they are blocked */
1002 
1003  vports = lpfc_create_vport_work_array(phba);
1004  if (vports != NULL)
1005  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1006  lpfc_linkup_port(vports[i]);
1007  lpfc_destroy_vport_work_array(phba, vports);
1008  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1009  (phba->sli_rev < LPFC_SLI_REV4))
1010  lpfc_issue_clear_la(phba, phba->pport);
1011 
1012  return 0;
1013 }
1014 
1015 /*
1016  * This routine handles processing a CLEAR_LA mailbox
1017  * command upon completion. It is setup in the LPFC_MBOXQ
1018  * as the completion routine when the command is
1019  * handed off to the SLI layer.
1020  */
1021 static void
1022 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1023 {
1024  struct lpfc_vport *vport = pmb->vport;
1025  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026  struct lpfc_sli *psli = &phba->sli;
1027  MAILBOX_t *mb = &pmb->u.mb;
1028  uint32_t control;
1029 
1030  /* Since we don't do discovery right now, turn these off here */
1031  psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1032  psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1033  psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1034 
1035  /* Check for error */
1036  if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1037  /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1039  "0320 CLEAR_LA mbxStatus error x%x hba "
1040  "state x%x\n",
1041  mb->mbxStatus, vport->port_state);
1042  phba->link_state = LPFC_HBA_ERROR;
1043  goto out;
1044  }
1045 
1046  if (vport->port_type == LPFC_PHYSICAL_PORT)
1047  phba->link_state = LPFC_HBA_READY;
1048 
1049  spin_lock_irq(&phba->hbalock);
1050  psli->sli_flag |= LPFC_PROCESS_LA;
1051  control = readl(phba->HCregaddr);
1052  control |= HC_LAINT_ENA;
1053  writel(control, phba->HCregaddr);
1054  readl(phba->HCregaddr); /* flush */
1055  spin_unlock_irq(&phba->hbalock);
1056  mempool_free(pmb, phba->mbox_mem_pool);
1057  return;
1058 
1059 out:
1060  /* Device Discovery completes */
1062  "0225 Device Discovery completes\n");
1063  mempool_free(pmb, phba->mbox_mem_pool);
1064 
1065  spin_lock_irq(shost->host_lock);
1066  vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1067  spin_unlock_irq(shost->host_lock);
1068 
1069  lpfc_can_disctmo(vport);
1070 
1071  /* turn on Link Attention interrupts */
1072 
1073  spin_lock_irq(&phba->hbalock);
1074  psli->sli_flag |= LPFC_PROCESS_LA;
1075  control = readl(phba->HCregaddr);
1076  control |= HC_LAINT_ENA;
1077  writel(control, phba->HCregaddr);
1078  readl(phba->HCregaddr); /* flush */
1079  spin_unlock_irq(&phba->hbalock);
1080 
1081  return;
1082 }
1083 
1084 
1085 static void
1086 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087 {
1088  struct lpfc_vport *vport = pmb->vport;
1089 
1090  if (pmb->u.mb.mbxStatus)
1091  goto out;
1092 
1093  mempool_free(pmb, phba->mbox_mem_pool);
1094 
1095  /* don't perform discovery for SLI4 loopback diagnostic test */
1096  if ((phba->sli_rev == LPFC_SLI_REV4) &&
1097  !(phba->hba_flag & HBA_FCOE_MODE) &&
1098  (phba->link_flag & LS_LOOPBACK_MODE))
1099  return;
1100 
1101  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1102  vport->fc_flag & FC_PUBLIC_LOOP &&
1103  !(vport->fc_flag & FC_LBIT)) {
1104  /* Need to wait for FAN - use discovery timer
1105  * for timeout. port_state is identically
1106  * LPFC_LOCAL_CFG_LINK while waiting for FAN
1107  */
1108  lpfc_set_disctmo(vport);
1109  return;
1110  }
1111 
1112  /* Start discovery by sending a FLOGI. port_state is identically
1113  * LPFC_FLOGI while waiting for FLOGI cmpl
1114  */
1115  if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
1116  lpfc_initial_flogi(vport);
1117  return;
1118 
1119 out:
1121  "0306 CONFIG_LINK mbxStatus error x%x "
1122  "HBA state x%x\n",
1123  pmb->u.mb.mbxStatus, vport->port_state);
1124  mempool_free(pmb, phba->mbox_mem_pool);
1125 
1126  lpfc_linkdown(phba);
1127 
1129  "0200 CONFIG_LINK bad hba state x%x\n",
1130  vport->port_state);
1131 
1132  lpfc_issue_clear_la(phba, vport);
1133  return;
1134 }
1135 
1144 void
1146 {
1147  struct lpfc_fcf_pri *fcf_pri;
1148  struct lpfc_fcf_pri *next_fcf_pri;
1149  memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1150  spin_lock_irq(&phba->hbalock);
1151  list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1152  &phba->fcf.fcf_pri_list, list) {
1153  list_del_init(&fcf_pri->list);
1154  fcf_pri->fcf_rec.flag = 0;
1155  }
1156  spin_unlock_irq(&phba->hbalock);
1157 }
1158 static void
1159 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1160 {
1161  struct lpfc_vport *vport = mboxq->vport;
1162 
1163  if (mboxq->u.mb.mbxStatus) {
1165  "2017 REG_FCFI mbxStatus error x%x "
1166  "HBA state x%x\n",
1167  mboxq->u.mb.mbxStatus, vport->port_state);
1168  goto fail_out;
1169  }
1170 
1171  /* Start FCoE discovery by sending a FLOGI. */
1172  phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1173  /* Set the FCFI registered flag */
1174  spin_lock_irq(&phba->hbalock);
1175  phba->fcf.fcf_flag |= FCF_REGISTERED;
1176  spin_unlock_irq(&phba->hbalock);
1177 
1178  /* If there is a pending FCoE event, restart FCF table scan. */
1179  if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1181  goto fail_out;
1182 
1183  /* Mark successful completion of FCF table scan */
1184  spin_lock_irq(&phba->hbalock);
1185  phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1186  phba->hba_flag &= ~FCF_TS_INPROG;
1187  if (vport->port_state != LPFC_FLOGI) {
1188  phba->hba_flag |= FCF_RR_INPROG;
1189  spin_unlock_irq(&phba->hbalock);
1190  lpfc_issue_init_vfi(vport);
1191  goto out;
1192  }
1193  spin_unlock_irq(&phba->hbalock);
1194  goto out;
1195 
1196 fail_out:
1197  spin_lock_irq(&phba->hbalock);
1198  phba->hba_flag &= ~FCF_RR_INPROG;
1199  spin_unlock_irq(&phba->hbalock);
1200 out:
1201  mempool_free(mboxq, phba->mbox_mem_pool);
1202 }
1203 
1213 static uint32_t
1214 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1215 {
1216  if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1217  return 0;
1218  if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1219  return 0;
1220  if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1221  return 0;
1222  if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1223  return 0;
1224  if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1225  return 0;
1226  if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1227  return 0;
1228  if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1229  return 0;
1230  if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1231  return 0;
1232  return 1;
1233 }
1234 
1244 static uint32_t
1245 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1246 {
1247  if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1248  return 0;
1249  if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1250  return 0;
1251  if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1252  return 0;
1253  if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1254  return 0;
1255  if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1256  return 0;
1257  if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1258  return 0;
1259  if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1260  return 0;
1261  if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1262  return 0;
1263  return 1;
1264 }
1265 
1275 static uint32_t
1276 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1277 {
1278  if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1279  return 0;
1280  if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1281  return 0;
1282  if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1283  return 0;
1284  if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1285  return 0;
1286  if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1287  return 0;
1288  if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1289  return 0;
1290  return 1;
1291 }
1292 
1293 static bool
1294 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1295 {
1296  return (curr_vlan_id == new_vlan_id);
1297 }
1298 
1309 static void
1310 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1311  struct fcf_record *new_fcf_record
1312  )
1313 {
1314  struct lpfc_fcf_pri *fcf_pri;
1315 
1316  fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1317  fcf_pri->fcf_rec.fcf_index = fcf_index;
1318  /* FCF record priority */
1319  fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1320 
1321 }
1322 
1331 static void
1332 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1333  struct fcf_record *new_fcf_record)
1334 {
1335  /* Fabric name */
1336  fcf_rec->fabric_name[0] =
1337  bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1338  fcf_rec->fabric_name[1] =
1339  bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1340  fcf_rec->fabric_name[2] =
1341  bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1342  fcf_rec->fabric_name[3] =
1343  bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1344  fcf_rec->fabric_name[4] =
1345  bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1346  fcf_rec->fabric_name[5] =
1347  bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1348  fcf_rec->fabric_name[6] =
1349  bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1350  fcf_rec->fabric_name[7] =
1351  bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1352  /* Mac address */
1353  fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1354  fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1355  fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1356  fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1357  fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1358  fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1359  /* FCF record index */
1360  fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1361  /* FCF record priority */
1362  fcf_rec->priority = new_fcf_record->fip_priority;
1363  /* Switch name */
1364  fcf_rec->switch_name[0] =
1365  bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1366  fcf_rec->switch_name[1] =
1367  bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1368  fcf_rec->switch_name[2] =
1369  bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1370  fcf_rec->switch_name[3] =
1371  bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1372  fcf_rec->switch_name[4] =
1373  bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1374  fcf_rec->switch_name[5] =
1375  bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1376  fcf_rec->switch_name[6] =
1377  bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1378  fcf_rec->switch_name[7] =
1379  bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1380 }
1381 
1395 static void
1396 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1397  struct fcf_record *new_fcf_record, uint32_t addr_mode,
1399 {
1400  /* Copy the fields from the HBA's FCF record */
1401  lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1402  /* Update other fields of driver FCF record */
1403  fcf_rec->addr_mode = addr_mode;
1404  fcf_rec->vlan_id = vlan_id;
1405  fcf_rec->flag |= (flag | RECORD_VALID);
1406  __lpfc_update_fcf_record_pri(phba,
1407  bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1408  new_fcf_record);
1409 }
1410 
1418 static void
1419 lpfc_register_fcf(struct lpfc_hba *phba)
1420 {
1421  LPFC_MBOXQ_t *fcf_mbxq;
1422  int rc;
1423 
1424  spin_lock_irq(&phba->hbalock);
1425  /* If the FCF is not available do nothing. */
1426  if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1427  phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1428  spin_unlock_irq(&phba->hbalock);
1429  return;
1430  }
1431 
1432  /* The FCF is already registered, start discovery */
1433  if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1434  phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1435  phba->hba_flag &= ~FCF_TS_INPROG;
1436  if (phba->pport->port_state != LPFC_FLOGI) {
1437  phba->hba_flag |= FCF_RR_INPROG;
1438  spin_unlock_irq(&phba->hbalock);
1439  lpfc_initial_flogi(phba->pport);
1440  return;
1441  }
1442  spin_unlock_irq(&phba->hbalock);
1443  return;
1444  }
1445  spin_unlock_irq(&phba->hbalock);
1446 
1447  fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1448  if (!fcf_mbxq) {
1449  spin_lock_irq(&phba->hbalock);
1450  phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1451  spin_unlock_irq(&phba->hbalock);
1452  return;
1453  }
1454 
1455  lpfc_reg_fcfi(phba, fcf_mbxq);
1456  fcf_mbxq->vport = phba->pport;
1457  fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1458  rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1459  if (rc == MBX_NOT_FINISHED) {
1460  spin_lock_irq(&phba->hbalock);
1461  phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1462  spin_unlock_irq(&phba->hbalock);
1463  mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1464  }
1465 
1466  return;
1467 }
1468 
1487 static int
1488 lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1489  struct fcf_record *new_fcf_record,
1490  uint32_t *boot_flag, uint32_t *addr_mode,
1491  uint16_t *vlan_id)
1492 {
1493  struct lpfc_fcf_conn_entry *conn_entry;
1494  int i, j, fcf_vlan_id = 0;
1495 
1496  /* Find the lowest VLAN id in the FCF record */
1497  for (i = 0; i < 512; i++) {
1498  if (new_fcf_record->vlan_bitmap[i]) {
1499  fcf_vlan_id = i * 8;
1500  j = 0;
1501  while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1502  j++;
1503  fcf_vlan_id++;
1504  }
1505  break;
1506  }
1507  }
1508 
1509  /* FCF not valid/available or solicitation in progress */
1510  if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1511  !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1512  bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1513  return 0;
1514 
1515  if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1516  *boot_flag = 0;
1517  *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1518  new_fcf_record);
1519  if (phba->valid_vlan)
1520  *vlan_id = phba->vlan_id;
1521  else
1522  *vlan_id = LPFC_FCOE_NULL_VID;
1523  return 1;
1524  }
1525 
1526  /*
1527  * If there are no FCF connection table entry, driver connect to all
1528  * FCFs.
1529  */
1530  if (list_empty(&phba->fcf_conn_rec_list)) {
1531  *boot_flag = 0;
1532  *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1533  new_fcf_record);
1534 
1535  /*
1536  * When there are no FCF connect entries, use driver's default
1537  * addressing mode - FPMA.
1538  */
1539  if (*addr_mode & LPFC_FCF_FPMA)
1540  *addr_mode = LPFC_FCF_FPMA;
1541 
1542  /* If FCF record report a vlan id use that vlan id */
1543  if (fcf_vlan_id)
1544  *vlan_id = fcf_vlan_id;
1545  else
1546  *vlan_id = LPFC_FCOE_NULL_VID;
1547  return 1;
1548  }
1549 
1550  list_for_each_entry(conn_entry,
1551  &phba->fcf_conn_rec_list, list) {
1552  if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1553  continue;
1554 
1555  if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1556  !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1557  new_fcf_record))
1558  continue;
1559  if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1560  !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1561  new_fcf_record))
1562  continue;
1563  if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1564  /*
1565  * If the vlan bit map does not have the bit set for the
1566  * vlan id to be used, then it is not a match.
1567  */
1568  if (!(new_fcf_record->vlan_bitmap
1569  [conn_entry->conn_rec.vlan_tag / 8] &
1570  (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1571  continue;
1572  }
1573 
1574  /*
1575  * If connection record does not support any addressing mode,
1576  * skip the FCF record.
1577  */
1578  if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1579  & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1580  continue;
1581 
1582  /*
1583  * Check if the connection record specifies a required
1584  * addressing mode.
1585  */
1586  if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1587  !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1588 
1589  /*
1590  * If SPMA required but FCF not support this continue.
1591  */
1592  if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1593  !(bf_get(lpfc_fcf_record_mac_addr_prov,
1594  new_fcf_record) & LPFC_FCF_SPMA))
1595  continue;
1596 
1597  /*
1598  * If FPMA required but FCF not support this continue.
1599  */
1600  if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1601  !(bf_get(lpfc_fcf_record_mac_addr_prov,
1602  new_fcf_record) & LPFC_FCF_FPMA))
1603  continue;
1604  }
1605 
1606  /*
1607  * This fcf record matches filtering criteria.
1608  */
1609  if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1610  *boot_flag = 1;
1611  else
1612  *boot_flag = 0;
1613 
1614  /*
1615  * If user did not specify any addressing mode, or if the
1616  * preferred addressing mode specified by user is not supported
1617  * by FCF, allow fabric to pick the addressing mode.
1618  */
1619  *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1620  new_fcf_record);
1621  /*
1622  * If the user specified a required address mode, assign that
1623  * address mode
1624  */
1625  if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1626  (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1627  *addr_mode = (conn_entry->conn_rec.flags &
1628  FCFCNCT_AM_SPMA) ?
1629  LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1630  /*
1631  * If the user specified a preferred address mode, use the
1632  * addr mode only if FCF support the addr_mode.
1633  */
1634  else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1635  (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1636  (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1637  (*addr_mode & LPFC_FCF_SPMA))
1638  *addr_mode = LPFC_FCF_SPMA;
1639  else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1640  (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1641  !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1642  (*addr_mode & LPFC_FCF_FPMA))
1643  *addr_mode = LPFC_FCF_FPMA;
1644 
1645  /* If matching connect list has a vlan id, use it */
1646  if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1647  *vlan_id = conn_entry->conn_rec.vlan_tag;
1648  /*
1649  * If no vlan id is specified in connect list, use the vlan id
1650  * in the FCF record
1651  */
1652  else if (fcf_vlan_id)
1653  *vlan_id = fcf_vlan_id;
1654  else
1655  *vlan_id = LPFC_FCOE_NULL_VID;
1656 
1657  return 1;
1658  }
1659 
1660  return 0;
1661 }
1662 
1672 int
1674 {
1675  /*
1676  * If the Link is up and no FCoE events while in the
1677  * FCF discovery, no need to restart FCF discovery.
1678  */
1679  if ((phba->link_state >= LPFC_LINK_UP) &&
1680  (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1681  return 0;
1682 
1684  "2768 Pending link or FCF event during current "
1685  "handling of the previous event: link_state:x%x, "
1686  "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1688  phba->fcoe_eventtag);
1689 
1690  spin_lock_irq(&phba->hbalock);
1691  phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1692  spin_unlock_irq(&phba->hbalock);
1693 
1694  if (phba->link_state >= LPFC_LINK_UP) {
1696  "2780 Restart FCF table scan due to "
1697  "pending FCF event:evt_tag_at_scan:x%x, "
1698  "evt_tag_current:x%x\n",
1700  phba->fcoe_eventtag);
1702  } else {
1703  /*
1704  * Do not continue FCF discovery and clear FCF_TS_INPROG
1705  * flag
1706  */
1708  "2833 Stop FCF discovery process due to link "
1709  "state change (x%x)\n", phba->link_state);
1710  spin_lock_irq(&phba->hbalock);
1711  phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1712  phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1713  spin_unlock_irq(&phba->hbalock);
1714  }
1715 
1716  /* Unregister the currently registered FCF if required */
1717  if (unreg_fcf) {
1718  spin_lock_irq(&phba->hbalock);
1719  phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1720  spin_unlock_irq(&phba->hbalock);
1722  }
1723  return 1;
1724 }
1725 
1741 static bool
1742 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1743 {
1744  uint32_t rand_num;
1745 
1746  /* Get 16-bit uniform random number */
1747  rand_num = (0xFFFF & random32());
1748 
1749  /* Decision with probability 1/fcf_cnt */
1750  if ((fcf_cnt * rand_num) < 0xFFFF)
1751  return true;
1752  else
1753  return false;
1754 }
1755 
1769 static struct fcf_record *
1770 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1771  uint16_t *next_fcf_index)
1772 {
1773  void *virt_addr;
1775  struct lpfc_mbx_sge sge;
1776  struct lpfc_mbx_read_fcf_tbl *read_fcf;
1777  uint32_t shdr_status, shdr_add_status;
1778  union lpfc_sli4_cfg_shdr *shdr;
1779  struct fcf_record *new_fcf_record;
1780 
1781  /* Get the first SGE entry from the non-embedded DMA memory. This
1782  * routine only uses a single SGE.
1783  */
1784  lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1785  phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1786  if (unlikely(!mboxq->sge_array)) {
1788  "2524 Failed to get the non-embedded SGE "
1789  "virtual address\n");
1790  return NULL;
1791  }
1792  virt_addr = mboxq->sge_array->addr[0];
1793 
1794  shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1795  shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1796  shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1797  if (shdr_status || shdr_add_status) {
1798  if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1800  "2726 READ_FCF_RECORD Indicates empty "
1801  "FCF table.\n");
1802  else
1804  "2521 READ_FCF_RECORD mailbox failed "
1805  "with status x%x add_status x%x, "
1806  "mbx\n", shdr_status, shdr_add_status);
1807  return NULL;
1808  }
1809 
1810  /* Interpreting the returned information of the FCF record */
1811  read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1812  lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1813  sizeof(struct lpfc_mbx_read_fcf_tbl));
1814  *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1815  new_fcf_record = (struct fcf_record *)(virt_addr +
1816  sizeof(struct lpfc_mbx_read_fcf_tbl));
1817  lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1818  offsetof(struct fcf_record, vlan_bitmap));
1819  new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1820  new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1821 
1822  return new_fcf_record;
1823 }
1824 
1835 static void
1836 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1837  struct fcf_record *fcf_record,
1838  uint16_t vlan_id,
1839  uint16_t next_fcf_index)
1840 {
1842  "2764 READ_FCF_RECORD:\n"
1843  "\tFCF_Index : x%x\n"
1844  "\tFCF_Avail : x%x\n"
1845  "\tFCF_Valid : x%x\n"
1846  "\tFCF_SOL : x%x\n"
1847  "\tFIP_Priority : x%x\n"
1848  "\tMAC_Provider : x%x\n"
1849  "\tLowest VLANID : x%x\n"
1850  "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1851  "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1852  "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1853  "\tNext_FCF_Index: x%x\n",
1854  bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1855  bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1856  bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1857  bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1858  fcf_record->fip_priority,
1859  bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1860  vlan_id,
1861  bf_get(lpfc_fcf_record_mac_0, fcf_record),
1862  bf_get(lpfc_fcf_record_mac_1, fcf_record),
1863  bf_get(lpfc_fcf_record_mac_2, fcf_record),
1864  bf_get(lpfc_fcf_record_mac_3, fcf_record),
1865  bf_get(lpfc_fcf_record_mac_4, fcf_record),
1866  bf_get(lpfc_fcf_record_mac_5, fcf_record),
1867  bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1868  bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1869  bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1870  bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1871  bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1872  bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1873  bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1874  bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1875  bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1876  bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1877  bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1878  bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1879  bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1880  bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1881  bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1882  bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1883  next_fcf_index);
1884 }
1885 
1899 static bool
1900 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1901  struct lpfc_fcf_rec *fcf_rec,
1902  struct fcf_record *new_fcf_record,
1903  uint16_t new_vlan_id)
1904 {
1905  if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1906  if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1907  return false;
1908  if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1909  return false;
1910  if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1911  return false;
1912  if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1913  return false;
1914  if (fcf_rec->priority != new_fcf_record->fip_priority)
1915  return false;
1916  return true;
1917 }
1918 
1930 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1931 {
1932  struct lpfc_hba *phba = vport->phba;
1933  int rc;
1934 
1935  if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1936  spin_lock_irq(&phba->hbalock);
1937  if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1938  spin_unlock_irq(&phba->hbalock);
1940  "2872 Devloss tmo with no eligible "
1941  "FCF, unregister in-use FCF (x%x) "
1942  "and rescan FCF table\n",
1943  phba->fcf.current_rec.fcf_indx);
1945  goto stop_flogi_current_fcf;
1946  }
1947  /* Mark the end to FLOGI roundrobin failover */
1948  phba->hba_flag &= ~FCF_RR_INPROG;
1949  /* Allow action to new fcf asynchronous event */
1950  phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1951  spin_unlock_irq(&phba->hbalock);
1953  "2865 No FCF available, stop roundrobin FCF "
1954  "failover and change port state:x%x/x%x\n",
1955  phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1956  phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1957  goto stop_flogi_current_fcf;
1958  } else {
1960  "2794 Try FLOGI roundrobin FCF failover to "
1961  "(x%x)\n", fcf_index);
1962  rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1963  if (rc)
1965  "2761 FLOGI roundrobin FCF failover "
1966  "failed (rc:x%x) to read FCF (x%x)\n",
1967  rc, phba->fcf.current_rec.fcf_indx);
1968  else
1969  goto stop_flogi_current_fcf;
1970  }
1971  return 0;
1972 
1973 stop_flogi_current_fcf:
1974  lpfc_can_disctmo(vport);
1975  return 1;
1976 }
1977 
1987 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1988  uint16_t fcf_index)
1989 {
1990  struct lpfc_fcf_pri *new_fcf_pri;
1991 
1992  new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1994  "3058 deleting idx x%x pri x%x flg x%x\n",
1995  fcf_index, new_fcf_pri->fcf_rec.priority,
1996  new_fcf_pri->fcf_rec.flag);
1997  spin_lock_irq(&phba->hbalock);
1998  if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
1999  if (phba->fcf.current_rec.priority ==
2000  new_fcf_pri->fcf_rec.priority)
2001  phba->fcf.eligible_fcf_cnt--;
2002  list_del_init(&new_fcf_pri->list);
2003  new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2004  }
2005  spin_unlock_irq(&phba->hbalock);
2006 }
2007 
2018 void
2020 {
2021  struct lpfc_fcf_pri *new_fcf_pri;
2022  new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2023  spin_lock_irq(&phba->hbalock);
2024  new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2025  spin_unlock_irq(&phba->hbalock);
2026 }
2027 
2043 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2044  struct fcf_record *new_fcf_record)
2045 {
2046  uint16_t current_fcf_pri;
2047  uint16_t last_index;
2048  struct lpfc_fcf_pri *fcf_pri;
2049  struct lpfc_fcf_pri *next_fcf_pri;
2050  struct lpfc_fcf_pri *new_fcf_pri;
2051  int ret;
2052 
2053  new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2055  "3059 adding idx x%x pri x%x flg x%x\n",
2056  fcf_index, new_fcf_record->fip_priority,
2057  new_fcf_pri->fcf_rec.flag);
2058  spin_lock_irq(&phba->hbalock);
2059  if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2060  list_del_init(&new_fcf_pri->list);
2061  new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2062  new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2063  if (list_empty(&phba->fcf.fcf_pri_list)) {
2064  list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2065  ret = lpfc_sli4_fcf_rr_index_set(phba,
2066  new_fcf_pri->fcf_rec.fcf_index);
2067  goto out;
2068  }
2069 
2070  last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2072  if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2073  ret = 0; /* Empty rr list */
2074  goto out;
2075  }
2076  current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2077  if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2078  list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2079  if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2080  memset(phba->fcf.fcf_rr_bmask, 0,
2081  sizeof(*phba->fcf.fcf_rr_bmask));
2082  /* fcfs_at_this_priority_level = 1; */
2083  phba->fcf.eligible_fcf_cnt = 1;
2084  } else
2085  /* fcfs_at_this_priority_level++; */
2086  phba->fcf.eligible_fcf_cnt++;
2087  ret = lpfc_sli4_fcf_rr_index_set(phba,
2088  new_fcf_pri->fcf_rec.fcf_index);
2089  goto out;
2090  }
2091 
2092  list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2093  &phba->fcf.fcf_pri_list, list) {
2094  if (new_fcf_pri->fcf_rec.priority <=
2095  fcf_pri->fcf_rec.priority) {
2096  if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2097  list_add(&new_fcf_pri->list,
2098  &phba->fcf.fcf_pri_list);
2099  else
2100  list_add(&new_fcf_pri->list,
2101  &((struct lpfc_fcf_pri *)
2102  fcf_pri->list.prev)->list);
2103  ret = 0;
2104  goto out;
2105  } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2106  || new_fcf_pri->fcf_rec.priority <
2107  next_fcf_pri->fcf_rec.priority) {
2108  list_add(&new_fcf_pri->list, &fcf_pri->list);
2109  ret = 0;
2110  goto out;
2111  }
2112  if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2113  continue;
2114 
2115  }
2116  ret = 1;
2117 out:
2118  /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2119  new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2120  spin_unlock_irq(&phba->hbalock);
2121  return ret;
2122 }
2123 
2139 void
2141 {
2142  struct fcf_record *new_fcf_record;
2144  uint16_t fcf_index, next_fcf_index;
2145  struct lpfc_fcf_rec *fcf_rec = NULL;
2146  uint16_t vlan_id;
2147  uint32_t seed;
2148  bool select_new_fcf;
2149  int rc;
2150 
2151  /* If there is pending FCoE event restart FCF table scan */
2153  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2154  return;
2155  }
2156 
2157  /* Parse the FCF record from the non-embedded mailbox command */
2158  new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2159  &next_fcf_index);
2160  if (!new_fcf_record) {
2162  "2765 Mailbox command READ_FCF_RECORD "
2163  "failed to retrieve a FCF record.\n");
2164  /* Let next new FCF event trigger fast failover */
2165  spin_lock_irq(&phba->hbalock);
2166  phba->hba_flag &= ~FCF_TS_INPROG;
2167  spin_unlock_irq(&phba->hbalock);
2168  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2169  return;
2170  }
2171 
2172  /* Check the FCF record against the connection list */
2173  rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2174  &addr_mode, &vlan_id);
2175 
2176  /* Log the FCF record information if turned on */
2177  lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2178  next_fcf_index);
2179 
2180  /*
2181  * If the fcf record does not match with connect list entries
2182  * read the next entry; otherwise, this is an eligible FCF
2183  * record for roundrobin FCF failover.
2184  */
2185  if (!rc) {
2186  lpfc_sli4_fcf_pri_list_del(phba,
2187  bf_get(lpfc_fcf_record_fcf_index,
2188  new_fcf_record));
2190  "2781 FCF (x%x) failed connection "
2191  "list check: (x%x/x%x/%x)\n",
2192  bf_get(lpfc_fcf_record_fcf_index,
2193  new_fcf_record),
2194  bf_get(lpfc_fcf_record_fcf_avail,
2195  new_fcf_record),
2196  bf_get(lpfc_fcf_record_fcf_valid,
2197  new_fcf_record),
2198  bf_get(lpfc_fcf_record_fcf_sol,
2199  new_fcf_record));
2200  if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2201  lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2202  new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2203  if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2204  phba->fcf.current_rec.fcf_indx) {
2206  "2862 FCF (x%x) matches property "
2207  "of in-use FCF (x%x)\n",
2208  bf_get(lpfc_fcf_record_fcf_index,
2209  new_fcf_record),
2210  phba->fcf.current_rec.fcf_indx);
2211  goto read_next_fcf;
2212  }
2213  /*
2214  * In case the current in-use FCF record becomes
2215  * invalid/unavailable during FCF discovery that
2216  * was not triggered by fast FCF failover process,
2217  * treat it as fast FCF failover.
2218  */
2219  if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2220  !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2222  "2835 Invalid in-use FCF "
2223  "(x%x), enter FCF failover "
2224  "table scan.\n",
2225  phba->fcf.current_rec.fcf_indx);
2226  spin_lock_irq(&phba->hbalock);
2227  phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2228  spin_unlock_irq(&phba->hbalock);
2229  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2232  return;
2233  }
2234  }
2235  goto read_next_fcf;
2236  } else {
2237  fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2238  rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2239  new_fcf_record);
2240  if (rc)
2241  goto read_next_fcf;
2242  }
2243 
2244  /*
2245  * If this is not the first FCF discovery of the HBA, use last
2246  * FCF record for the discovery. The condition that a rescan
2247  * matches the in-use FCF record: fabric name, switch name, mac
2248  * address, and vlan_id.
2249  */
2250  spin_lock_irq(&phba->hbalock);
2251  if (phba->fcf.fcf_flag & FCF_IN_USE) {
2252  if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2253  lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2254  new_fcf_record, vlan_id)) {
2255  if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2256  phba->fcf.current_rec.fcf_indx) {
2257  phba->fcf.fcf_flag |= FCF_AVAILABLE;
2258  if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2259  /* Stop FCF redisc wait timer */
2261  phba);
2262  else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2263  /* Fast failover, mark completed */
2264  phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2265  spin_unlock_irq(&phba->hbalock);
2267  "2836 New FCF matches in-use "
2268  "FCF (x%x)\n",
2269  phba->fcf.current_rec.fcf_indx);
2270  goto out;
2271  } else
2273  "2863 New FCF (x%x) matches "
2274  "property of in-use FCF (x%x)\n",
2275  bf_get(lpfc_fcf_record_fcf_index,
2276  new_fcf_record),
2277  phba->fcf.current_rec.fcf_indx);
2278  }
2279  /*
2280  * Read next FCF record from HBA searching for the matching
2281  * with in-use record only if not during the fast failover
2282  * period. In case of fast failover period, it shall try to
2283  * determine whether the FCF record just read should be the
2284  * next candidate.
2285  */
2286  if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2287  spin_unlock_irq(&phba->hbalock);
2288  goto read_next_fcf;
2289  }
2290  }
2291  /*
2292  * Update on failover FCF record only if it's in FCF fast-failover
2293  * period; otherwise, update on current FCF record.
2294  */
2295  if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2296  fcf_rec = &phba->fcf.failover_rec;
2297  else
2298  fcf_rec = &phba->fcf.current_rec;
2299 
2300  if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2301  /*
2302  * If the driver FCF record does not have boot flag
2303  * set and new hba fcf record has boot flag set, use
2304  * the new hba fcf record.
2305  */
2306  if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2307  /* Choose this FCF record */
2309  "2837 Update current FCF record "
2310  "(x%x) with new FCF record (x%x)\n",
2311  fcf_rec->fcf_indx,
2312  bf_get(lpfc_fcf_record_fcf_index,
2313  new_fcf_record));
2314  __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2315  addr_mode, vlan_id, BOOT_ENABLE);
2316  spin_unlock_irq(&phba->hbalock);
2317  goto read_next_fcf;
2318  }
2319  /*
2320  * If the driver FCF record has boot flag set and the
2321  * new hba FCF record does not have boot flag, read
2322  * the next FCF record.
2323  */
2324  if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2325  spin_unlock_irq(&phba->hbalock);
2326  goto read_next_fcf;
2327  }
2328  /*
2329  * If the new hba FCF record has lower priority value
2330  * than the driver FCF record, use the new record.
2331  */
2332  if (new_fcf_record->fip_priority < fcf_rec->priority) {
2333  /* Choose the new FCF record with lower priority */
2335  "2838 Update current FCF record "
2336  "(x%x) with new FCF record (x%x)\n",
2337  fcf_rec->fcf_indx,
2338  bf_get(lpfc_fcf_record_fcf_index,
2339  new_fcf_record));
2340  __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2341  addr_mode, vlan_id, 0);
2342  /* Reset running random FCF selection count */
2343  phba->fcf.eligible_fcf_cnt = 1;
2344  } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2345  /* Update running random FCF selection count */
2346  phba->fcf.eligible_fcf_cnt++;
2347  select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2348  phba->fcf.eligible_fcf_cnt);
2349  if (select_new_fcf) {
2351  "2839 Update current FCF record "
2352  "(x%x) with new FCF record (x%x)\n",
2353  fcf_rec->fcf_indx,
2354  bf_get(lpfc_fcf_record_fcf_index,
2355  new_fcf_record));
2356  /* Choose the new FCF by random selection */
2357  __lpfc_update_fcf_record(phba, fcf_rec,
2358  new_fcf_record,
2359  addr_mode, vlan_id, 0);
2360  }
2361  }
2362  spin_unlock_irq(&phba->hbalock);
2363  goto read_next_fcf;
2364  }
2365  /*
2366  * This is the first suitable FCF record, choose this record for
2367  * initial best-fit FCF.
2368  */
2369  if (fcf_rec) {
2371  "2840 Update initial FCF candidate "
2372  "with FCF (x%x)\n",
2373  bf_get(lpfc_fcf_record_fcf_index,
2374  new_fcf_record));
2375  __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2376  addr_mode, vlan_id, (boot_flag ?
2377  BOOT_ENABLE : 0));
2378  phba->fcf.fcf_flag |= FCF_AVAILABLE;
2379  /* Setup initial running random FCF selection count */
2380  phba->fcf.eligible_fcf_cnt = 1;
2381  /* Seeding the random number generator for random selection */
2382  seed = (uint32_t)(0xFFFFFFFF & jiffies);
2383  srandom32(seed);
2384  }
2385  spin_unlock_irq(&phba->hbalock);
2386  goto read_next_fcf;
2387 
2388 read_next_fcf:
2389  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2390  if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2391  if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2392  /*
2393  * Case of FCF fast failover scan
2394  */
2395 
2396  /*
2397  * It has not found any suitable FCF record, cancel
2398  * FCF scan inprogress, and do nothing
2399  */
2400  if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2402  "2782 No suitable FCF found: "
2403  "(x%x/x%x)\n",
2405  bf_get(lpfc_fcf_record_fcf_index,
2406  new_fcf_record));
2407  spin_lock_irq(&phba->hbalock);
2408  if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2409  phba->hba_flag &= ~FCF_TS_INPROG;
2410  spin_unlock_irq(&phba->hbalock);
2411  /* Unregister in-use FCF and rescan */
2412  lpfc_printf_log(phba, KERN_INFO,
2413  LOG_FIP,
2414  "2864 On devloss tmo "
2415  "unreg in-use FCF and "
2416  "rescan FCF table\n");
2418  return;
2419  }
2420  /*
2421  * Let next new FCF event trigger fast failover
2422  */
2423  phba->hba_flag &= ~FCF_TS_INPROG;
2424  spin_unlock_irq(&phba->hbalock);
2425  return;
2426  }
2427  /*
2428  * It has found a suitable FCF record that is not
2429  * the same as in-use FCF record, unregister the
2430  * in-use FCF record, replace the in-use FCF record
2431  * with the new FCF record, mark FCF fast failover
2432  * completed, and then start register the new FCF
2433  * record.
2434  */
2435 
2436  /* Unregister the current in-use FCF record */
2437  lpfc_unregister_fcf(phba);
2438 
2439  /* Replace in-use record with the new record */
2441  "2842 Replace in-use FCF (x%x) "
2442  "with failover FCF (x%x)\n",
2443  phba->fcf.current_rec.fcf_indx,
2444  phba->fcf.failover_rec.fcf_indx);
2445  memcpy(&phba->fcf.current_rec,
2446  &phba->fcf.failover_rec,
2447  sizeof(struct lpfc_fcf_rec));
2448  /*
2449  * Mark the fast FCF failover rediscovery completed
2450  * and the start of the first round of the roundrobin
2451  * FCF failover.
2452  */
2453  spin_lock_irq(&phba->hbalock);
2454  phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2455  spin_unlock_irq(&phba->hbalock);
2456  /* Register to the new FCF record */
2457  lpfc_register_fcf(phba);
2458  } else {
2459  /*
2460  * In case of transaction period to fast FCF failover,
2461  * do nothing when search to the end of the FCF table.
2462  */
2463  if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2464  (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2465  return;
2466 
2467  if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2468  phba->fcf.fcf_flag & FCF_IN_USE) {
2469  /*
2470  * In case the current in-use FCF record no
2471  * longer existed during FCF discovery that
2472  * was not triggered by fast FCF failover
2473  * process, treat it as fast FCF failover.
2474  */
2476  "2841 In-use FCF record (x%x) "
2477  "not reported, entering fast "
2478  "FCF failover mode scanning.\n",
2479  phba->fcf.current_rec.fcf_indx);
2480  spin_lock_irq(&phba->hbalock);
2481  phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2482  spin_unlock_irq(&phba->hbalock);
2485  return;
2486  }
2487  /* Register to the new FCF record */
2488  lpfc_register_fcf(phba);
2489  }
2490  } else
2491  lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2492  return;
2493 
2494 out:
2495  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2496  lpfc_register_fcf(phba);
2497 
2498  return;
2499 }
2500 
2516 void
2518 {
2519  struct fcf_record *new_fcf_record;
2521  uint16_t next_fcf_index, fcf_index;
2522  uint16_t current_fcf_index;
2523  uint16_t vlan_id;
2524  int rc;
2525 
2526  /* If link state is not up, stop the roundrobin failover process */
2527  if (phba->link_state < LPFC_LINK_UP) {
2528  spin_lock_irq(&phba->hbalock);
2529  phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2530  phba->hba_flag &= ~FCF_RR_INPROG;
2531  spin_unlock_irq(&phba->hbalock);
2532  goto out;
2533  }
2534 
2535  /* Parse the FCF record from the non-embedded mailbox command */
2536  new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2537  &next_fcf_index);
2538  if (!new_fcf_record) {
2540  "2766 Mailbox command READ_FCF_RECORD "
2541  "failed to retrieve a FCF record.\n");
2542  goto error_out;
2543  }
2544 
2545  /* Get the needed parameters from FCF record */
2546  rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2547  &addr_mode, &vlan_id);
2548 
2549  /* Log the FCF record information if turned on */
2550  lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2551  next_fcf_index);
2552 
2553  fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2554  if (!rc) {
2556  "2848 Remove ineligible FCF (x%x) from "
2557  "from roundrobin bmask\n", fcf_index);
2558  /* Clear roundrobin bmask bit for ineligible FCF */
2559  lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2560  /* Perform next round of roundrobin FCF failover */
2561  fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2562  rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2563  if (rc)
2564  goto out;
2565  goto error_out;
2566  }
2567 
2568  if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2570  "2760 Perform FLOGI roundrobin FCF failover: "
2571  "FCF (x%x) back to FCF (x%x)\n",
2572  phba->fcf.current_rec.fcf_indx, fcf_index);
2573  /* Wait 500 ms before retrying FLOGI to current FCF */
2574  msleep(500);
2575  lpfc_issue_init_vfi(phba->pport);
2576  goto out;
2577  }
2578 
2579  /* Upload new FCF record to the failover FCF record */
2581  "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2582  phba->fcf.failover_rec.fcf_indx, fcf_index);
2583  spin_lock_irq(&phba->hbalock);
2584  __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2585  new_fcf_record, addr_mode, vlan_id,
2586  (boot_flag ? BOOT_ENABLE : 0));
2587  spin_unlock_irq(&phba->hbalock);
2588 
2589  current_fcf_index = phba->fcf.current_rec.fcf_indx;
2590 
2591  /* Unregister the current in-use FCF record */
2592  lpfc_unregister_fcf(phba);
2593 
2594  /* Replace in-use record with the new record */
2595  memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2596  sizeof(struct lpfc_fcf_rec));
2597 
2599  "2783 Perform FLOGI roundrobin FCF failover: FCF "
2600  "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2601 
2602 error_out:
2603  lpfc_register_fcf(phba);
2604 out:
2605  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2606 }
2607 
2619 void
2621 {
2622  struct fcf_record *new_fcf_record;
2624  uint16_t fcf_index, next_fcf_index;
2625  uint16_t vlan_id;
2626  int rc;
2627 
2628  /* If link state is not up, no need to proceed */
2629  if (phba->link_state < LPFC_LINK_UP)
2630  goto out;
2631 
2632  /* If FCF discovery period is over, no need to proceed */
2633  if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2634  goto out;
2635 
2636  /* Parse the FCF record from the non-embedded mailbox command */
2637  new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2638  &next_fcf_index);
2639  if (!new_fcf_record) {
2641  "2767 Mailbox command READ_FCF_RECORD "
2642  "failed to retrieve a FCF record.\n");
2643  goto out;
2644  }
2645 
2646  /* Check the connection list for eligibility */
2647  rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2648  &addr_mode, &vlan_id);
2649 
2650  /* Log the FCF record information if turned on */
2651  lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2652  next_fcf_index);
2653 
2654  if (!rc)
2655  goto out;
2656 
2657  /* Update the eligible FCF record index bmask */
2658  fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2659 
2660  rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2661 
2662 out:
2663  lpfc_sli4_mbox_cmd_free(phba, mboxq);
2664 }
2665 
2673 void
2675 {
2676  struct lpfc_vport *vport = mboxq->vport;
2677 
2678  /*
2679  * VFI not supported on interface type 0, just do the flogi
2680  * Also continue if the VFI is in use - just use the same one.
2681  */
2682  if (mboxq->u.mb.mbxStatus &&
2683  (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2685  mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2686  lpfc_printf_vlog(vport, KERN_ERR,
2687  LOG_MBOX,
2688  "2891 Init VFI mailbox failed 0x%x\n",
2689  mboxq->u.mb.mbxStatus);
2690  mempool_free(mboxq, phba->mbox_mem_pool);
2692  return;
2693  }
2694 
2695  lpfc_initial_flogi(vport);
2696  mempool_free(mboxq, phba->mbox_mem_pool);
2697  return;
2698 }
2699 
2707 void
2709 {
2710  LPFC_MBOXQ_t *mboxq;
2711  int rc;
2712  struct lpfc_hba *phba = vport->phba;
2713 
2714  mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2715  if (!mboxq) {
2716  lpfc_printf_vlog(vport, KERN_ERR,
2717  LOG_MBOX, "2892 Failed to allocate "
2718  "init_vfi mailbox\n");
2719  return;
2720  }
2721  lpfc_init_vfi(mboxq, vport);
2722  mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2723  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2724  if (rc == MBX_NOT_FINISHED) {
2725  lpfc_printf_vlog(vport, KERN_ERR,
2726  LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2727  mempool_free(mboxq, vport->phba->mbox_mem_pool);
2728  }
2729 }
2730 
2738 void
2740 {
2741  struct lpfc_vport *vport = mboxq->vport;
2742  struct lpfc_nodelist *ndlp;
2743  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2744 
2745  if (mboxq->u.mb.mbxStatus) {
2746  lpfc_printf_vlog(vport, KERN_ERR,
2747  LOG_MBOX,
2748  "2609 Init VPI mailbox failed 0x%x\n",
2749  mboxq->u.mb.mbxStatus);
2750  mempool_free(mboxq, phba->mbox_mem_pool);
2752  return;
2753  }
2754  spin_lock_irq(shost->host_lock);
2755  vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2756  spin_unlock_irq(shost->host_lock);
2757 
2758  /* If this port is physical port or FDISC is done, do reg_vpi */
2759  if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2760  ndlp = lpfc_findnode_did(vport, Fabric_DID);
2761  if (!ndlp)
2762  lpfc_printf_vlog(vport, KERN_ERR,
2763  LOG_DISCOVERY,
2764  "2731 Cannot find fabric "
2765  "controller node\n");
2766  else
2767  lpfc_register_new_vport(phba, vport, ndlp);
2768  mempool_free(mboxq, phba->mbox_mem_pool);
2769  return;
2770  }
2771 
2772  if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2773  lpfc_initial_fdisc(vport);
2774  else {
2777  "2606 No NPIV Fabric support\n");
2778  }
2779  mempool_free(mboxq, phba->mbox_mem_pool);
2780  return;
2781 }
2782 
2790 void
2792 {
2793  LPFC_MBOXQ_t *mboxq;
2794  int rc;
2795 
2796  mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2797  if (!mboxq) {
2798  lpfc_printf_vlog(vport, KERN_ERR,
2799  LOG_MBOX, "2607 Failed to allocate "
2800  "init_vpi mailbox\n");
2801  return;
2802  }
2803  lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2804  mboxq->vport = vport;
2805  mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2806  rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2807  if (rc == MBX_NOT_FINISHED) {
2808  lpfc_printf_vlog(vport, KERN_ERR,
2809  LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2810  mempool_free(mboxq, vport->phba->mbox_mem_pool);
2811  }
2812 }
2813 
2821 void
2823 {
2824  struct lpfc_vport **vports;
2825  int i;
2826 
2827  vports = lpfc_create_vport_work_array(phba);
2828  if (vports != NULL) {
2829  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2830  if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2831  continue;
2832  /* There are no vpi for this vport */
2833  if (vports[i]->vpi > phba->max_vpi) {
2834  lpfc_vport_set_state(vports[i],
2835  FC_VPORT_FAILED);
2836  continue;
2837  }
2838  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2839  lpfc_vport_set_state(vports[i],
2841  continue;
2842  }
2843  if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2844  lpfc_issue_init_vpi(vports[i]);
2845  continue;
2846  }
2847  if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2848  lpfc_initial_fdisc(vports[i]);
2849  else {
2850  lpfc_vport_set_state(vports[i],
2852  lpfc_printf_vlog(vports[i], KERN_ERR,
2853  LOG_ELS,
2854  "0259 No NPIV "
2855  "Fabric support\n");
2856  }
2857  }
2858  }
2859  lpfc_destroy_vport_work_array(phba, vports);
2860 }
2861 
2862 void
2864 {
2865  struct lpfc_dmabuf *dmabuf = mboxq->context1;
2866  struct lpfc_vport *vport = mboxq->vport;
2867  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2868 
2869  /*
2870  * VFI not supported for interface type 0, so ignore any mailbox
2871  * error (except VFI in use) and continue with the discovery.
2872  */
2873  if (mboxq->u.mb.mbxStatus &&
2874  (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2876  mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2878  "2018 REG_VFI mbxStatus error x%x "
2879  "HBA state x%x\n",
2880  mboxq->u.mb.mbxStatus, vport->port_state);
2881  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2882  /* FLOGI failed, use loop map to make discovery list */
2883  lpfc_disc_list_loopmap(vport);
2884  /* Start discovery */
2885  lpfc_disc_start(vport);
2886  goto out_free_mem;
2887  }
2889  goto out_free_mem;
2890  }
2891  /* The VPI is implicitly registered when the VFI is registered */
2892  spin_lock_irq(shost->host_lock);
2893  vport->vpi_state |= LPFC_VPI_REGISTERED;
2894  vport->fc_flag |= FC_VFI_REGISTERED;
2895  vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2896  vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2897  spin_unlock_irq(shost->host_lock);
2898 
2899  /* In case SLI4 FC loopback test, we are ready */
2900  if ((phba->sli_rev == LPFC_SLI_REV4) &&
2901  (phba->link_flag & LS_LOOPBACK_MODE)) {
2902  phba->link_state = LPFC_HBA_READY;
2903  goto out_free_mem;
2904  }
2905 
2906  if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2907  /*
2908  * For private loop or for NPort pt2pt,
2909  * just start discovery and we are done.
2910  */
2911  if ((vport->fc_flag & FC_PT2PT) ||
2912  ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2913  !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2914 
2915  /* Use loop map to make discovery list */
2916  lpfc_disc_list_loopmap(vport);
2917  /* Start discovery */
2918  lpfc_disc_start(vport);
2919  } else {
2920  lpfc_start_fdiscs(phba);
2921  lpfc_do_scr_ns_plogi(phba, vport);
2922  }
2923  }
2924 
2925 out_free_mem:
2926  mempool_free(mboxq, phba->mbox_mem_pool);
2927  lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2928  kfree(dmabuf);
2929  return;
2930 }
2931 
2932 static void
2933 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2934 {
2935  MAILBOX_t *mb = &pmb->u.mb;
2936  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2937  struct lpfc_vport *vport = pmb->vport;
2938 
2939 
2940  /* Check for error */
2941  if (mb->mbxStatus) {
2942  /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2944  "0319 READ_SPARAM mbxStatus error x%x "
2945  "hba state x%x>\n",
2946  mb->mbxStatus, vport->port_state);
2947  lpfc_linkdown(phba);
2948  goto out;
2949  }
2950 
2951  memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2952  sizeof (struct serv_parm));
2953  lpfc_update_vport_wwn(vport);
2954  if (vport->port_type == LPFC_PHYSICAL_PORT) {
2955  memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
2956  memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
2957  }
2958 
2959  lpfc_mbuf_free(phba, mp->virt, mp->phys);
2960  kfree(mp);
2961  mempool_free(pmb, phba->mbox_mem_pool);
2962  return;
2963 
2964 out:
2965  pmb->context1 = NULL;
2966  lpfc_mbuf_free(phba, mp->virt, mp->phys);
2967  kfree(mp);
2968  lpfc_issue_clear_la(phba, vport);
2969  mempool_free(pmb, phba->mbox_mem_pool);
2970  return;
2971 }
2972 
2973 static void
2974 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2975 {
2976  struct lpfc_vport *vport = phba->pport;
2977  LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
2978  struct Scsi_Host *shost;
2979  int i;
2980  struct lpfc_dmabuf *mp;
2981  int rc;
2982  struct fcf_record *fcf_record;
2983 
2984  spin_lock_irq(&phba->hbalock);
2985  switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
2986  case LPFC_LINK_SPEED_1GHZ:
2987  case LPFC_LINK_SPEED_2GHZ:
2988  case LPFC_LINK_SPEED_4GHZ:
2989  case LPFC_LINK_SPEED_8GHZ:
2990  case LPFC_LINK_SPEED_10GHZ:
2991  case LPFC_LINK_SPEED_16GHZ:
2992  phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
2993  break;
2994  default:
2996  break;
2997  }
2998 
2999  phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3000  phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3001 
3002  shost = lpfc_shost_from_vport(vport);
3003  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3005 
3006  /* if npiv is enabled and this adapter supports npiv log
3007  * a message that npiv is not supported in this topology
3008  */
3009  if (phba->cfg_enable_npiv && phba->max_vpi)
3011  "1309 Link Up Event npiv not supported in loop "
3012  "topology\n");
3013  /* Get Loop Map information */
3014  if (bf_get(lpfc_mbx_read_top_il, la)) {
3015  spin_lock(shost->host_lock);
3016  vport->fc_flag |= FC_LBIT;
3017  spin_unlock(shost->host_lock);
3018  }
3019 
3020  vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3021  i = la->lilpBde64.tus.f.bdeSize;
3022 
3023  if (i == 0) {
3024  phba->alpa_map[0] = 0;
3025  } else {
3026  if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3027  int numalpa, j, k;
3028  union {
3029  uint8_t pamap[16];
3030  struct {
3031  uint32_t wd1;
3032  uint32_t wd2;
3033  uint32_t wd3;
3034  uint32_t wd4;
3035  } pa;
3036  } un;
3037  numalpa = phba->alpa_map[0];
3038  j = 0;
3039  while (j < numalpa) {
3040  memset(un.pamap, 0, 16);
3041  for (k = 1; j < numalpa; k++) {
3042  un.pamap[k - 1] =
3043  phba->alpa_map[j + 1];
3044  j++;
3045  if (k == 16)
3046  break;
3047  }
3048  /* Link Up Event ALPA map */
3049  lpfc_printf_log(phba,
3050  KERN_WARNING,
3052  "1304 Link Up Event "
3053  "ALPA map Data: x%x "
3054  "x%x x%x x%x\n",
3055  un.pa.wd1, un.pa.wd2,
3056  un.pa.wd3, un.pa.wd4);
3057  }
3058  }
3059  }
3060  } else {
3061  if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3062  if (phba->max_vpi && phba->cfg_enable_npiv &&
3063  (phba->sli_rev >= LPFC_SLI_REV3))
3065  }
3066  vport->fc_myDID = phba->fc_pref_DID;
3067  spin_lock(shost->host_lock);
3068  vport->fc_flag |= FC_LBIT;
3069  spin_unlock(shost->host_lock);
3070  }
3071  spin_unlock_irq(&phba->hbalock);
3072 
3073  lpfc_linkup(phba);
3074  sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3075  if (!sparam_mbox)
3076  goto out;
3077 
3078  rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3079  if (rc) {
3080  mempool_free(sparam_mbox, phba->mbox_mem_pool);
3081  goto out;
3082  }
3083  sparam_mbox->vport = vport;
3084  sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3085  rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3086  if (rc == MBX_NOT_FINISHED) {
3087  mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
3088  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3089  kfree(mp);
3090  mempool_free(sparam_mbox, phba->mbox_mem_pool);
3091  goto out;
3092  }
3093 
3094  if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3095  cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3096  if (!cfglink_mbox)
3097  goto out;
3099  lpfc_config_link(phba, cfglink_mbox);
3100  cfglink_mbox->vport = vport;
3101  cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3102  rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3103  if (rc == MBX_NOT_FINISHED) {
3104  mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3105  goto out;
3106  }
3107  } else {
3108  vport->port_state = LPFC_VPORT_UNKNOWN;
3109  /*
3110  * Add the driver's default FCF record at FCF index 0 now. This
3111  * is phase 1 implementation that support FCF index 0 and driver
3112  * defaults.
3113  */
3114  if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3115  fcf_record = kzalloc(sizeof(struct fcf_record),
3116  GFP_KERNEL);
3117  if (unlikely(!fcf_record)) {
3118  lpfc_printf_log(phba, KERN_ERR,
3119  LOG_MBOX | LOG_SLI,
3120  "2554 Could not allocate memory for "
3121  "fcf record\n");
3122  rc = -ENODEV;
3123  goto out;
3124  }
3125 
3126  lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3128  rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3129  if (unlikely(rc)) {
3130  lpfc_printf_log(phba, KERN_ERR,
3131  LOG_MBOX | LOG_SLI,
3132  "2013 Could not manually add FCF "
3133  "record 0, status %d\n", rc);
3134  rc = -ENODEV;
3135  kfree(fcf_record);
3136  goto out;
3137  }
3138  kfree(fcf_record);
3139  }
3140  /*
3141  * The driver is expected to do FIP/FCF. Call the port
3142  * and get the FCF Table.
3143  */
3144  spin_lock_irq(&phba->hbalock);
3145  if (phba->hba_flag & FCF_TS_INPROG) {
3146  spin_unlock_irq(&phba->hbalock);
3147  return;
3148  }
3149  /* This is the initial FCF discovery scan */
3150  phba->fcf.fcf_flag |= FCF_INIT_DISC;
3151  spin_unlock_irq(&phba->hbalock);
3153  "2778 Start FCF table scan at linkup\n");
3156  if (rc) {
3157  spin_lock_irq(&phba->hbalock);
3158  phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3159  spin_unlock_irq(&phba->hbalock);
3160  goto out;
3161  }
3162  /* Reset FCF roundrobin bmask for new discovery */
3164  }
3165 
3166  return;
3167 out:
3170  "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3171  vport->port_state, sparam_mbox, cfglink_mbox);
3172  lpfc_issue_clear_la(phba, vport);
3173  return;
3174 }
3175 
3176 static void
3177 lpfc_enable_la(struct lpfc_hba *phba)
3178 {
3179  uint32_t control;
3180  struct lpfc_sli *psli = &phba->sli;
3181  spin_lock_irq(&phba->hbalock);
3182  psli->sli_flag |= LPFC_PROCESS_LA;
3183  if (phba->sli_rev <= LPFC_SLI_REV3) {
3184  control = readl(phba->HCregaddr);
3185  control |= HC_LAINT_ENA;
3186  writel(control, phba->HCregaddr);
3187  readl(phba->HCregaddr); /* flush */
3188  }
3189  spin_unlock_irq(&phba->hbalock);
3190 }
3191 
3192 static void
3193 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3194 {
3195  lpfc_linkdown(phba);
3196  lpfc_enable_la(phba);
3198  /* turn on Link Attention interrupts - no CLEAR_LA needed */
3199 }
3200 
3201 
3202 /*
3203  * This routine handles processing a READ_TOPOLOGY mailbox
3204  * command upon completion. It is setup in the LPFC_MBOXQ
3205  * as the completion routine when the command is
3206  * handed off to the SLI layer.
3207  */
3208 void
3210 {
3211  struct lpfc_vport *vport = pmb->vport;
3212  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3213  struct lpfc_mbx_read_top *la;
3214  MAILBOX_t *mb = &pmb->u.mb;
3215  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3216 
3217  /* Unblock ELS traffic */
3218  phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
3219  /* Check for error */
3220  if (mb->mbxStatus) {
3222  "1307 READ_LA mbox error x%x state x%x\n",
3223  mb->mbxStatus, vport->port_state);
3224  lpfc_mbx_issue_link_down(phba);
3225  phba->link_state = LPFC_HBA_ERROR;
3226  goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3227  }
3228 
3229  la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3230 
3231  memcpy(&phba->alpa_map[0], mp->virt, 128);
3232 
3233  spin_lock_irq(shost->host_lock);
3234  if (bf_get(lpfc_mbx_read_top_pb, la))
3235  vport->fc_flag |= FC_BYPASSED_MODE;
3236  else
3237  vport->fc_flag &= ~FC_BYPASSED_MODE;
3238  spin_unlock_irq(shost->host_lock);
3239 
3240  if ((phba->fc_eventTag < la->eventTag) ||
3241  (phba->fc_eventTag == la->eventTag)) {
3242  phba->fc_stat.LinkMultiEvent++;
3243  if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
3244  if (phba->fc_eventTag != 0)
3245  lpfc_linkdown(phba);
3246  }
3247 
3248  phba->fc_eventTag = la->eventTag;
3249  spin_lock_irq(&phba->hbalock);
3250  if (bf_get(lpfc_mbx_read_top_mm, la))
3251  phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3252  else
3253  phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3254  spin_unlock_irq(&phba->hbalock);
3255 
3256  phba->link_events++;
3257  if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
3258  (!bf_get(lpfc_mbx_read_top_mm, la))) {
3259  phba->fc_stat.LinkUp++;
3260  if (phba->link_flag & LS_LOOPBACK_MODE) {
3262  "1306 Link Up Event in loop back mode "
3263  "x%x received Data: x%x x%x x%x x%x\n",
3264  la->eventTag, phba->fc_eventTag,
3265  bf_get(lpfc_mbx_read_top_alpa_granted,
3266  la),
3267  bf_get(lpfc_mbx_read_top_link_spd, la),
3268  phba->alpa_map[0]);
3269  } else {
3271  "1303 Link Up Event x%x received "
3272  "Data: x%x x%x x%x x%x x%x x%x %d\n",
3273  la->eventTag, phba->fc_eventTag,
3274  bf_get(lpfc_mbx_read_top_alpa_granted,
3275  la),
3276  bf_get(lpfc_mbx_read_top_link_spd, la),
3277  phba->alpa_map[0],
3278  bf_get(lpfc_mbx_read_top_mm, la),
3279  bf_get(lpfc_mbx_read_top_fa, la),
3280  phba->wait_4_mlo_maint_flg);
3281  }
3282  lpfc_mbx_process_link_up(phba, la);
3283  } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
3285  phba->fc_stat.LinkDown++;
3286  if (phba->link_flag & LS_LOOPBACK_MODE)
3288  "1308 Link Down Event in loop back mode "
3289  "x%x received "
3290  "Data: x%x x%x x%x\n",
3291  la->eventTag, phba->fc_eventTag,
3292  phba->pport->port_state, vport->fc_flag);
3293  else
3295  "1305 Link Down Event x%x received "
3296  "Data: x%x x%x x%x x%x x%x\n",
3297  la->eventTag, phba->fc_eventTag,
3298  phba->pport->port_state, vport->fc_flag,
3299  bf_get(lpfc_mbx_read_top_mm, la),
3300  bf_get(lpfc_mbx_read_top_fa, la));
3301  lpfc_mbx_issue_link_down(phba);
3302  }
3303  if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
3304  (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
3305  if (phba->link_state != LPFC_LINK_DOWN) {
3306  phba->fc_stat.LinkDown++;
3308  "1312 Link Down Event x%x received "
3309  "Data: x%x x%x x%x\n",
3310  la->eventTag, phba->fc_eventTag,
3311  phba->pport->port_state, vport->fc_flag);
3312  lpfc_mbx_issue_link_down(phba);
3313  } else
3314  lpfc_enable_la(phba);
3315 
3317  "1310 Menlo Maint Mode Link up Event x%x rcvd "
3318  "Data: x%x x%x x%x\n",
3319  la->eventTag, phba->fc_eventTag,
3320  phba->pport->port_state, vport->fc_flag);
3321  /*
3322  * The cmnd that triggered this will be waiting for this
3323  * signal.
3324  */
3325  /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3326  if (phba->wait_4_mlo_maint_flg) {
3327  phba->wait_4_mlo_maint_flg = 0;
3329  }
3330  }
3331 
3332  if (bf_get(lpfc_mbx_read_top_fa, la)) {
3333  if (bf_get(lpfc_mbx_read_top_mm, la))
3334  lpfc_issue_clear_la(phba, vport);
3336  "1311 fa %d\n",
3337  bf_get(lpfc_mbx_read_top_fa, la));
3338  }
3339 
3340 lpfc_mbx_cmpl_read_topology_free_mbuf:
3341  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3342  kfree(mp);
3343  mempool_free(pmb, phba->mbox_mem_pool);
3344  return;
3345 }
3346 
3347 /*
3348  * This routine handles processing a REG_LOGIN mailbox
3349  * command upon completion. It is setup in the LPFC_MBOXQ
3350  * as the completion routine when the command is
3351  * handed off to the SLI layer.
3352  */
3353 void
3355 {
3356  struct lpfc_vport *vport = pmb->vport;
3357  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3358  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3359  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3360 
3361  pmb->context1 = NULL;
3362  pmb->context2 = NULL;
3363 
3364  if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3365  ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3366 
3367  if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3369  /* We rcvd a rscn after issuing this
3370  * mbox reg login, we may have cycled
3371  * back through the state and be
3372  * back at reg login state so this
3373  * mbox needs to be ignored becase
3374  * there is another reg login in
3375  * process.
3376  */
3377  spin_lock_irq(shost->host_lock);
3378  ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3379  spin_unlock_irq(shost->host_lock);
3380  } else
3381  /* Good status, call state machine */
3382  lpfc_disc_state_machine(vport, ndlp, pmb,
3384 
3385  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3386  kfree(mp);
3387  mempool_free(pmb, phba->mbox_mem_pool);
3388  /* decrement the node reference count held for this callback
3389  * function.
3390  */
3391  lpfc_nlp_put(ndlp);
3392 
3393  return;
3394 }
3395 
3396 static void
3397 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3398 {
3399  MAILBOX_t *mb = &pmb->u.mb;
3400  struct lpfc_vport *vport = pmb->vport;
3401  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3402 
3403  switch (mb->mbxStatus) {
3404  case 0x0011:
3405  case 0x0020:
3407  "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3408  mb->mbxStatus);
3409  break;
3410  /* If VPI is busy, reset the HBA */
3411  case 0x9700:
3413  "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3414  vport->vpi, mb->mbxStatus);
3415  if (!(phba->pport->load_flag & FC_UNLOADING))
3418  }
3419  spin_lock_irq(shost->host_lock);
3420  vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3421  vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3422  spin_unlock_irq(shost->host_lock);
3423  vport->unreg_vpi_cmpl = VPORT_OK;
3424  mempool_free(pmb, phba->mbox_mem_pool);
3426  /*
3427  * This shost reference might have been taken at the beginning of
3428  * lpfc_vport_delete()
3429  */
3430  if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3431  scsi_host_put(shost);
3432 }
3433 
3434 int
3436 {
3437  struct lpfc_hba *phba = vport->phba;
3438  LPFC_MBOXQ_t *mbox;
3439  int rc;
3440 
3441  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3442  if (!mbox)
3443  return 1;
3444 
3445  lpfc_unreg_vpi(phba, vport->vpi, mbox);
3446  mbox->vport = vport;
3447  mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3448  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3449  if (rc == MBX_NOT_FINISHED) {
3451  "1800 Could not issue unreg_vpi\n");
3452  mempool_free(mbox, phba->mbox_mem_pool);
3453  vport->unreg_vpi_cmpl = VPORT_ERROR;
3454  return rc;
3455  }
3456  return 0;
3457 }
3458 
3459 static void
3460 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3461 {
3462  struct lpfc_vport *vport = pmb->vport;
3463  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3464  MAILBOX_t *mb = &pmb->u.mb;
3465 
3466  switch (mb->mbxStatus) {
3467  case 0x0011:
3468  case 0x9601:
3469  case 0x9602:
3471  "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3472  mb->mbxStatus);
3474  spin_lock_irq(shost->host_lock);
3475  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3476  spin_unlock_irq(shost->host_lock);
3477  vport->fc_myDID = 0;
3478  goto out;
3479  }
3480 
3481  spin_lock_irq(shost->host_lock);
3482  vport->vpi_state |= LPFC_VPI_REGISTERED;
3483  vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3484  spin_unlock_irq(shost->host_lock);
3485  vport->num_disc_nodes = 0;
3486  /* go thru NPR list and issue ELS PLOGIs */
3487  if (vport->fc_npr_cnt)
3488  lpfc_els_disc_plogi(vport);
3489 
3490  if (!vport->num_disc_nodes) {
3491  spin_lock_irq(shost->host_lock);
3492  vport->fc_flag &= ~FC_NDISC_ACTIVE;
3493  spin_unlock_irq(shost->host_lock);
3494  lpfc_can_disctmo(vport);
3495  }
3496  vport->port_state = LPFC_VPORT_READY;
3497 
3498 out:
3499  mempool_free(pmb, phba->mbox_mem_pool);
3500  return;
3501 }
3502 
3511 void
3513 {
3514  LPFC_MBOXQ_t *pmb = NULL;
3515  MAILBOX_t *mb;
3516  struct static_vport_info *vport_info;
3517  int mbx_wait_rc = 0, i;
3518  struct fc_vport_identifiers vport_id;
3519  struct fc_vport *new_fc_vport;
3520  struct Scsi_Host *shost;
3521  struct lpfc_vport *vport;
3522  uint16_t offset = 0;
3523  uint8_t *vport_buff;
3524  struct lpfc_dmabuf *mp;
3525  uint32_t byte_count = 0;
3526 
3527  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3528  if (!pmb) {
3530  "0542 lpfc_create_static_vport failed to"
3531  " allocate mailbox memory\n");
3532  return;
3533  }
3534  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3535  mb = &pmb->u.mb;
3536 
3537  vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3538  if (!vport_info) {
3540  "0543 lpfc_create_static_vport failed to"
3541  " allocate vport_info\n");
3542  mempool_free(pmb, phba->mbox_mem_pool);
3543  return;
3544  }
3545 
3546  vport_buff = (uint8_t *) vport_info;
3547  do {
3548  /* free dma buffer from previous round */
3549  if (pmb->context1) {
3550  mp = (struct lpfc_dmabuf *)pmb->context1;
3551  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3552  kfree(mp);
3553  }
3554  if (lpfc_dump_static_vport(phba, pmb, offset))
3555  goto out;
3556 
3557  pmb->vport = phba->pport;
3558  mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3559  LPFC_MBOX_TMO);
3560 
3561  if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3563  "0544 lpfc_create_static_vport failed to"
3564  " issue dump mailbox command ret 0x%x "
3565  "status 0x%x\n",
3566  mbx_wait_rc, mb->mbxStatus);
3567  goto out;
3568  }
3569 
3570  if (phba->sli_rev == LPFC_SLI_REV4) {
3571  byte_count = pmb->u.mqe.un.mb_words[5];
3572  mp = (struct lpfc_dmabuf *)pmb->context1;
3573  if (byte_count > sizeof(struct static_vport_info) -
3574  offset)
3575  byte_count = sizeof(struct static_vport_info)
3576  - offset;
3577  memcpy(vport_buff + offset, mp->virt, byte_count);
3578  offset += byte_count;
3579  } else {
3580  if (mb->un.varDmp.word_cnt >
3581  sizeof(struct static_vport_info) - offset)
3582  mb->un.varDmp.word_cnt =
3583  sizeof(struct static_vport_info)
3584  - offset;
3585  byte_count = mb->un.varDmp.word_cnt;
3587  vport_buff + offset,
3588  byte_count);
3589 
3590  offset += byte_count;
3591  }
3592 
3593  } while (byte_count &&
3594  offset < sizeof(struct static_vport_info));
3595 
3596 
3597  if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3598  ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3599  != VPORT_INFO_REV)) {
3601  "0545 lpfc_create_static_vport bad"
3602  " information header 0x%x 0x%x\n",
3603  le32_to_cpu(vport_info->signature),
3604  le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3605 
3606  goto out;
3607  }
3608 
3609  shost = lpfc_shost_from_vport(phba->pport);
3610 
3611  for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3612  memset(&vport_id, 0, sizeof(vport_id));
3613  vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3614  vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3615  if (!vport_id.port_name || !vport_id.node_name)
3616  continue;
3617 
3618  vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3619  vport_id.vport_type = FC_PORTTYPE_NPIV;
3620  vport_id.disable = false;
3621  new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3622 
3623  if (!new_fc_vport) {
3625  "0546 lpfc_create_static_vport failed to"
3626  " create vport\n");
3627  continue;
3628  }
3629 
3630  vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3631  vport->vport_flag |= STATIC_VPORT;
3632  }
3633 
3634 out:
3635  kfree(vport_info);
3636  if (mbx_wait_rc != MBX_TIMEOUT) {
3637  if (pmb->context1) {
3638  mp = (struct lpfc_dmabuf *)pmb->context1;
3639  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3640  kfree(mp);
3641  }
3642  mempool_free(pmb, phba->mbox_mem_pool);
3643  }
3644 
3645  return;
3646 }
3647 
3648 /*
3649  * This routine handles processing a Fabric REG_LOGIN mailbox
3650  * command upon completion. It is setup in the LPFC_MBOXQ
3651  * as the completion routine when the command is
3652  * handed off to the SLI layer.
3653  */
3654 void
3656 {
3657  struct lpfc_vport *vport = pmb->vport;
3658  MAILBOX_t *mb = &pmb->u.mb;
3659  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3660  struct lpfc_nodelist *ndlp;
3661  struct Scsi_Host *shost;
3662 
3663  ndlp = (struct lpfc_nodelist *) pmb->context2;
3664  pmb->context1 = NULL;
3665  pmb->context2 = NULL;
3666 
3667  if (mb->mbxStatus) {
3669  "0258 Register Fabric login error: 0x%x\n",
3670  mb->mbxStatus);
3671  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3672  kfree(mp);
3673  mempool_free(pmb, phba->mbox_mem_pool);
3674 
3675  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3676  /* FLOGI failed, use loop map to make discovery list */
3677  lpfc_disc_list_loopmap(vport);
3678 
3679  /* Start discovery */
3680  lpfc_disc_start(vport);
3681  /* Decrement the reference count to ndlp after the
3682  * reference to the ndlp are done.
3683  */
3684  lpfc_nlp_put(ndlp);
3685  return;
3686  }
3687 
3689  /* Decrement the reference count to ndlp after the reference
3690  * to the ndlp are done.
3691  */
3692  lpfc_nlp_put(ndlp);
3693  return;
3694  }
3695 
3696  if (phba->sli_rev < LPFC_SLI_REV4)
3697  ndlp->nlp_rpi = mb->un.varWords[0];
3698  ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3699  ndlp->nlp_type |= NLP_FABRIC;
3701 
3702  if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3703  /* when physical port receive logo donot start
3704  * vport discovery */
3705  if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3706  lpfc_start_fdiscs(phba);
3707  else {
3708  shost = lpfc_shost_from_vport(vport);
3709  spin_lock_irq(shost->host_lock);
3710  vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3711  spin_unlock_irq(shost->host_lock);
3712  }
3713  lpfc_do_scr_ns_plogi(phba, vport);
3714  }
3715 
3716  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3717  kfree(mp);
3718  mempool_free(pmb, phba->mbox_mem_pool);
3719 
3720  /* Drop the reference count from the mbox at the end after
3721  * all the current reference to the ndlp have been done.
3722  */
3723  lpfc_nlp_put(ndlp);
3724  return;
3725 }
3726 
3727 /*
3728  * This routine handles processing a NameServer REG_LOGIN mailbox
3729  * command upon completion. It is setup in the LPFC_MBOXQ
3730  * as the completion routine when the command is
3731  * handed off to the SLI layer.
3732  */
3733 void
3735 {
3736  MAILBOX_t *mb = &pmb->u.mb;
3737  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3738  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3739  struct lpfc_vport *vport = pmb->vport;
3740 
3741  pmb->context1 = NULL;
3742  pmb->context2 = NULL;
3743 
3744  if (mb->mbxStatus) {
3745 out:
3747  "0260 Register NameServer error: 0x%x\n",
3748  mb->mbxStatus);
3749  /* decrement the node reference count held for this
3750  * callback function.
3751  */
3752  lpfc_nlp_put(ndlp);
3753  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3754  kfree(mp);
3755  mempool_free(pmb, phba->mbox_mem_pool);
3756 
3757  /* If no other thread is using the ndlp, free it */
3758  lpfc_nlp_not_used(ndlp);
3759 
3760  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3761  /*
3762  * RegLogin failed, use loop map to make discovery
3763  * list
3764  */
3765  lpfc_disc_list_loopmap(vport);
3766 
3767  /* Start discovery */
3768  lpfc_disc_start(vport);
3769  return;
3770  }
3772  return;
3773  }
3774 
3775  if (phba->sli_rev < LPFC_SLI_REV4)
3776  ndlp->nlp_rpi = mb->un.varWords[0];
3777  ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3778  ndlp->nlp_type |= NLP_FABRIC;
3780 
3781  if (vport->port_state < LPFC_VPORT_READY) {
3782  /* Link up discovery requires Fabric registration. */
3783  lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
3784  lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3785  lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3786  lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3787  lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3788 
3789  /* Issue SCR just before NameServer GID_FT Query */
3790  lpfc_issue_els_scr(vport, SCR_DID, 0);
3791  }
3792 
3793  vport->fc_ns_retry = 0;
3794  /* Good status, issue CT Request to NameServer */
3795  if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
3796  /* Cannot issue NameServer Query, so finish up discovery */
3797  goto out;
3798  }
3799 
3800  /* decrement the node reference count held for this
3801  * callback function.
3802  */
3803  lpfc_nlp_put(ndlp);
3804  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3805  kfree(mp);
3806  mempool_free(pmb, phba->mbox_mem_pool);
3807 
3808  return;
3809 }
3810 
3811 static void
3812 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3813 {
3814  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3815  struct fc_rport *rport;
3816  struct lpfc_rport_data *rdata;
3817  struct fc_rport_identifiers rport_ids;
3818  struct lpfc_hba *phba = vport->phba;
3819 
3820  /* Remote port has reappeared. Re-register w/ FC transport */
3821  rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
3822  rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3823  rport_ids.port_id = ndlp->nlp_DID;
3824  rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3825 
3826  /*
3827  * We leave our node pointer in rport->dd_data when we unregister a
3828  * FCP target port. But fc_remote_port_add zeros the space to which
3829  * rport->dd_data points. So, if we're reusing a previously
3830  * registered port, drop the reference that we took the last time we
3831  * registered the port.
3832  */
3833  if (ndlp->rport && ndlp->rport->dd_data &&
3834  ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
3835  lpfc_nlp_put(ndlp);
3836 
3838  "rport add: did:x%x flg:x%x type x%x",
3839  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3840 
3841  /* Don't add the remote port if unloading. */
3842  if (vport->load_flag & FC_UNLOADING)
3843  return;
3844 
3845  ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3846  if (!rport || !get_device(&rport->dev)) {
3847  dev_printk(KERN_WARNING, &phba->pcidev->dev,
3848  "Warning: fc_remote_port_add failed\n");
3849  return;
3850  }
3851 
3852  /* initialize static port data */
3853  rport->maxframe_size = ndlp->nlp_maxframe;
3854  rport->supported_classes = ndlp->nlp_class_sup;
3855  rdata = rport->dd_data;
3856  rdata->pnode = lpfc_nlp_get(ndlp);
3857 
3858  if (ndlp->nlp_type & NLP_FCP_TARGET)
3859  rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
3860  if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3861  rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3862 
3863  if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3864  fc_remote_port_rolechg(rport, rport_ids.roles);
3865 
3867  "3183 rport register x%06x, rport %p role x%x\n",
3868  ndlp->nlp_DID, rport, rport_ids.roles);
3869 
3870  if ((rport->scsi_target_id != -1) &&
3871  (rport->scsi_target_id < LPFC_MAX_TARGET)) {
3872  ndlp->nlp_sid = rport->scsi_target_id;
3873  }
3874  return;
3875 }
3876 
3877 static void
3878 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3879 {
3880  struct fc_rport *rport = ndlp->rport;
3881 
3883  "rport delete: did:x%x flg:x%x type x%x",
3884  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3885 
3887  "3184 rport unregister x%06x, rport %p\n",
3888  ndlp->nlp_DID, rport);
3889 
3890  fc_remote_port_delete(rport);
3891 
3892  return;
3893 }
3894 
3895 static void
3896 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
3897 {
3898  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3899 
3900  spin_lock_irq(shost->host_lock);
3901  switch (state) {
3902  case NLP_STE_UNUSED_NODE:
3903  vport->fc_unused_cnt += count;
3904  break;
3905  case NLP_STE_PLOGI_ISSUE:
3906  vport->fc_plogi_cnt += count;
3907  break;
3908  case NLP_STE_ADISC_ISSUE:
3909  vport->fc_adisc_cnt += count;
3910  break;
3912  vport->fc_reglogin_cnt += count;
3913  break;
3914  case NLP_STE_PRLI_ISSUE:
3915  vport->fc_prli_cnt += count;
3916  break;
3917  case NLP_STE_UNMAPPED_NODE:
3918  vport->fc_unmap_cnt += count;
3919  break;
3920  case NLP_STE_MAPPED_NODE:
3921  vport->fc_map_cnt += count;
3922  break;
3923  case NLP_STE_NPR_NODE:
3924  vport->fc_npr_cnt += count;
3925  break;
3926  }
3927  spin_unlock_irq(shost->host_lock);
3928 }
3929 
3930 static void
3931 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3932  int old_state, int new_state)
3933 {
3934  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3935 
3936  if (new_state == NLP_STE_UNMAPPED_NODE) {
3937  ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3938  ndlp->nlp_type |= NLP_FC_NODE;
3939  }
3940  if (new_state == NLP_STE_MAPPED_NODE)
3941  ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3942  if (new_state == NLP_STE_NPR_NODE)
3943  ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
3944 
3945  /* Transport interface */
3946  if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
3947  old_state == NLP_STE_UNMAPPED_NODE)) {
3948  vport->phba->nport_event_cnt++;
3949  lpfc_unregister_remote_port(ndlp);
3950  }
3951 
3952  if (new_state == NLP_STE_MAPPED_NODE ||
3953  new_state == NLP_STE_UNMAPPED_NODE) {
3954  vport->phba->nport_event_cnt++;
3955  /*
3956  * Tell the fc transport about the port, if we haven't
3957  * already. If we have, and it's a scsi entity, be
3958  * sure to unblock any attached scsi devices
3959  */
3960  lpfc_register_remote_port(vport, ndlp);
3961  }
3962  if ((new_state == NLP_STE_MAPPED_NODE) &&
3963  (vport->stat_data_enabled)) {
3964  /*
3965  * A new target is discovered, if there is no buffer for
3966  * statistical data collection allocate buffer.
3967  */
3968  ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
3969  sizeof(struct lpfc_scsicmd_bkt),
3970  GFP_KERNEL);
3971 
3972  if (!ndlp->lat_data)
3974  "0286 lpfc_nlp_state_cleanup failed to "
3975  "allocate statistical data buffer DID "
3976  "0x%x\n", ndlp->nlp_DID);
3977  }
3978  /*
3979  * if we added to Mapped list, but the remote port
3980  * registration failed or assigned a target id outside
3981  * our presentable range - move the node to the
3982  * Unmapped List
3983  */
3984  if (new_state == NLP_STE_MAPPED_NODE &&
3985  (!ndlp->rport ||
3986  ndlp->rport->scsi_target_id == -1 ||
3987  ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
3988  spin_lock_irq(shost->host_lock);
3989  ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
3990  spin_unlock_irq(shost->host_lock);
3992  }
3993 }
3994 
3995 static char *
3996 lpfc_nlp_state_name(char *buffer, size_t size, int state)
3997 {
3998  static char *states[] = {
3999  [NLP_STE_UNUSED_NODE] = "UNUSED",
4000  [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4001  [NLP_STE_ADISC_ISSUE] = "ADISC",
4002  [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4003  [NLP_STE_PRLI_ISSUE] = "PRLI",
4004  [NLP_STE_LOGO_ISSUE] = "LOGO",
4005  [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4006  [NLP_STE_MAPPED_NODE] = "MAPPED",
4007  [NLP_STE_NPR_NODE] = "NPR",
4008  };
4009 
4010  if (state < NLP_STE_MAX_STATE && states[state])
4011  strlcpy(buffer, states[state], size);
4012  else
4013  snprintf(buffer, size, "unknown (%d)", state);
4014  return buffer;
4015 }
4016 
4017 void
4018 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4019  int state)
4020 {
4021  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4022  int old_state = ndlp->nlp_state;
4023  char name1[16], name2[16];
4024 
4026  "0904 NPort state transition x%06x, %s -> %s\n",
4027  ndlp->nlp_DID,
4028  lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4029  lpfc_nlp_state_name(name2, sizeof(name2), state));
4030 
4032  "node statechg did:x%x old:%d ste:%d",
4033  ndlp->nlp_DID, old_state, state);
4034 
4035  if (old_state == NLP_STE_NPR_NODE &&
4036  state != NLP_STE_NPR_NODE)
4037  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4038  if (old_state == NLP_STE_UNMAPPED_NODE) {
4039  ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4040  ndlp->nlp_type &= ~NLP_FC_NODE;
4041  }
4042 
4043  if (list_empty(&ndlp->nlp_listp)) {
4044  spin_lock_irq(shost->host_lock);
4045  list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4046  spin_unlock_irq(shost->host_lock);
4047  } else if (old_state)
4048  lpfc_nlp_counters(vport, old_state, -1);
4049 
4050  ndlp->nlp_state = state;
4051  lpfc_nlp_counters(vport, state, 1);
4052  lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4053 }
4054 
4055 void
4056 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4057 {
4058  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4059 
4060  if (list_empty(&ndlp->nlp_listp)) {
4061  spin_lock_irq(shost->host_lock);
4062  list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4063  spin_unlock_irq(shost->host_lock);
4064  }
4065 }
4066 
4067 void
4068 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4069 {
4070  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4071 
4072  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4073  if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4074  lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4075  spin_lock_irq(shost->host_lock);
4076  list_del_init(&ndlp->nlp_listp);
4077  spin_unlock_irq(shost->host_lock);
4078  lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4080 }
4081 
4082 static void
4083 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4084 {
4085  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4086  if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4087  lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4088  lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4090 }
4105 static inline void
4106 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4107  uint32_t did)
4108 {
4109  INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4110  INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4111  init_timer(&ndlp->nlp_delayfunc);
4112  ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
4113  ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4114  ndlp->nlp_DID = did;
4115  ndlp->vport = vport;
4116  ndlp->phba = vport->phba;
4117  ndlp->nlp_sid = NLP_NO_SID;
4118  kref_init(&ndlp->kref);
4119  NLP_INT_NODE_ACT(ndlp);
4120  atomic_set(&ndlp->cmd_pending, 0);
4121  ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4122  if (vport->phba->sli_rev == LPFC_SLI_REV4)
4123  ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
4124 }
4125 
4126 struct lpfc_nodelist *
4127 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4128  int state)
4129 {
4130  struct lpfc_hba *phba = vport->phba;
4131  uint32_t did;
4132  unsigned long flags;
4133 
4134  if (!ndlp)
4135  return NULL;
4136 
4137  spin_lock_irqsave(&phba->ndlp_lock, flags);
4138  /* The ndlp should not be in memory free mode */
4139  if (NLP_CHK_FREE_REQ(ndlp)) {
4140  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4142  "0277 lpfc_enable_node: ndlp:x%p "
4143  "usgmap:x%x refcnt:%d\n",
4144  (void *)ndlp, ndlp->nlp_usg_map,
4145  atomic_read(&ndlp->kref.refcount));
4146  return NULL;
4147  }
4148  /* The ndlp should not already be in active mode */
4149  if (NLP_CHK_NODE_ACT(ndlp)) {
4150  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4152  "0278 lpfc_enable_node: ndlp:x%p "
4153  "usgmap:x%x refcnt:%d\n",
4154  (void *)ndlp, ndlp->nlp_usg_map,
4155  atomic_read(&ndlp->kref.refcount));
4156  return NULL;
4157  }
4158 
4159  /* Keep the original DID */
4160  did = ndlp->nlp_DID;
4161 
4162  /* re-initialize ndlp except of ndlp linked list pointer */
4163  memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4164  sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4165  lpfc_initialize_node(vport, ndlp, did);
4166 
4167  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4168 
4169  if (state != NLP_STE_UNUSED_NODE)
4170  lpfc_nlp_set_state(vport, ndlp, state);
4171 
4173  "node enable: did:x%x",
4174  ndlp->nlp_DID, 0, 0);
4175  return ndlp;
4176 }
4177 
4178 void
4179 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4180 {
4181  /*
4182  * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4183  * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4184  * the ndlp from the vport. The ndlp marked as UNUSED on the list
4185  * until ALL other outstanding threads have completed. We check
4186  * that the ndlp not already in the UNUSED state before we proceed.
4187  */
4188  if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4189  return;
4191  if (vport->phba->sli_rev == LPFC_SLI_REV4)
4192  lpfc_cleanup_vports_rrqs(vport, ndlp);
4193  lpfc_nlp_put(ndlp);
4194  return;
4195 }
4196 
4197 /*
4198  * Start / ReStart rescue timer for Discovery / RSCN handling
4199  */
4200 void
4202 {
4203  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4204  struct lpfc_hba *phba = vport->phba;
4205  uint32_t tmo;
4206 
4207  if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4208  /* For FAN, timeout should be greater than edtov */
4209  tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4210  } else {
4211  /* Normal discovery timeout should be > than ELS/CT timeout
4212  * FC spec states we need 3 * ratov for CT requests
4213  */
4214  tmo = ((phba->fc_ratov * 3) + 3);
4215  }
4216 
4217 
4218  if (!timer_pending(&vport->fc_disctmo)) {
4220  "set disc timer: tmo:x%x state:x%x flg:x%x",
4221  tmo, vport->port_state, vport->fc_flag);
4222  }
4223 
4224  mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
4225  spin_lock_irq(shost->host_lock);
4226  vport->fc_flag |= FC_DISC_TMO;
4227  spin_unlock_irq(shost->host_lock);
4228 
4229  /* Start Discovery Timer state <hba_state> */
4231  "0247 Start Discovery Timer state x%x "
4232  "Data: x%x x%lx x%x x%x\n",
4233  vport->port_state, tmo,
4234  (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4235  vport->fc_adisc_cnt);
4236 
4237  return;
4238 }
4239 
4240 /*
4241  * Cancel rescue timer for Discovery / RSCN handling
4242  */
4243 int
4245 {
4246  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4247  unsigned long iflags;
4248 
4250  "can disc timer: state:x%x rtry:x%x flg:x%x",
4251  vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4252 
4253  /* Turn off discovery timer if its running */
4254  if (vport->fc_flag & FC_DISC_TMO) {
4255  spin_lock_irqsave(shost->host_lock, iflags);
4256  vport->fc_flag &= ~FC_DISC_TMO;
4257  spin_unlock_irqrestore(shost->host_lock, iflags);
4258  del_timer_sync(&vport->fc_disctmo);
4259  spin_lock_irqsave(&vport->work_port_lock, iflags);
4260  vport->work_port_events &= ~WORKER_DISC_TMO;
4261  spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4262  }
4263 
4264  /* Cancel Discovery Timer state <hba_state> */
4266  "0248 Cancel Discovery Timer state x%x "
4267  "Data: x%x x%x x%x\n",
4268  vport->port_state, vport->fc_flag,
4269  vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4270  return 0;
4271 }
4272 
4273 /*
4274  * Check specified ring for outstanding IOCB on the SLI queue
4275  * Return true if iocb matches the specified nport
4276  */
4277 int
4279  struct lpfc_sli_ring *pring,
4280  struct lpfc_iocbq *iocb,
4281  struct lpfc_nodelist *ndlp)
4282 {
4283  struct lpfc_sli *psli = &phba->sli;
4284  IOCB_t *icmd = &iocb->iocb;
4285  struct lpfc_vport *vport = ndlp->vport;
4286 
4287  if (iocb->vport != vport)
4288  return 0;
4289 
4290  if (pring->ringno == LPFC_ELS_RING) {
4291  switch (icmd->ulpCommand) {
4292  case CMD_GEN_REQUEST64_CR:
4293  if (iocb->context_un.ndlp == ndlp)
4294  return 1;
4295  case CMD_ELS_REQUEST64_CR:
4296  if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4297  return 1;
4298  case CMD_XMIT_ELS_RSP64_CX:
4299  if (iocb->context1 == (uint8_t *) ndlp)
4300  return 1;
4301  }
4302  } else if (pring->ringno == psli->extra_ring) {
4303 
4304  } else if (pring->ringno == psli->fcp_ring) {
4305  /* Skip match check if waiting to relogin to FCP target */
4306  if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4307  (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4308  return 0;
4309  }
4310  if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4311  return 1;
4312  }
4313  } else if (pring->ringno == psli->next_ring) {
4314 
4315  }
4316  return 0;
4317 }
4318 
4319 /*
4320  * Free resources / clean up outstanding I/Os
4321  * associated with nlp_rpi in the LPFC_NODELIST entry.
4322  */
4323 static int
4324 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4325 {
4326  LIST_HEAD(completions);
4327  struct lpfc_sli *psli;
4328  struct lpfc_sli_ring *pring;
4329  struct lpfc_iocbq *iocb, *next_iocb;
4330  uint32_t i;
4331 
4333 
4334  /*
4335  * Everything that matches on txcmplq will be returned
4336  * by firmware with a no rpi error.
4337  */
4338  psli = &phba->sli;
4339  if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4340  /* Now process each ring */
4341  for (i = 0; i < psli->num_rings; i++) {
4342  pring = &psli->ring[i];
4343 
4344  spin_lock_irq(&phba->hbalock);
4345  list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
4346  list) {
4347  /*
4348  * Check to see if iocb matches the nport we are
4349  * looking for
4350  */
4351  if ((lpfc_check_sli_ndlp(phba, pring, iocb,
4352  ndlp))) {
4353  /* It matches, so deque and call compl
4354  with an error */
4355  list_move_tail(&iocb->list,
4356  &completions);
4357  pring->txq_cnt--;
4358  }
4359  }
4360  spin_unlock_irq(&phba->hbalock);
4361  }
4362  }
4363 
4364  /* Cancel all the IOCBs from the completions list */
4365  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4367 
4368  return 0;
4369 }
4370 
4379 void
4381 {
4382  struct lpfc_vport *vport = pmb->vport;
4383  struct lpfc_nodelist *ndlp;
4384 
4385  ndlp = (struct lpfc_nodelist *)(pmb->context1);
4386  if (!ndlp)
4387  return;
4388  lpfc_issue_els_logo(vport, ndlp, 0);
4389 }
4390 
4391 /*
4392  * Free rpi associated with LPFC_NODELIST entry.
4393  * This routine is called from lpfc_freenode(), when we are removing
4394  * a LPFC_NODELIST entry. It is also called if the driver initiates a
4395  * LOGO that completes successfully, and we are waiting to PLOGI back
4396  * to the remote NPort. In addition, it is called after we receive
4397  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4398  * we are waiting to PLOGI back to the remote NPort.
4399  */
4400 int
4401 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4402 {
4403  struct lpfc_hba *phba = vport->phba;
4404  LPFC_MBOXQ_t *mbox;
4405  int rc;
4406  uint16_t rpi;
4407 
4408  if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4409  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4410  if (mbox) {
4411  /* SLI4 ports require the physical rpi value. */
4412  rpi = ndlp->nlp_rpi;
4413  if (phba->sli_rev == LPFC_SLI_REV4)
4414  rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4415 
4416  lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4417  mbox->vport = vport;
4418  if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4419  mbox->context1 = ndlp;
4421  } else {
4423  }
4424 
4425  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4426  if (rc == MBX_NOT_FINISHED)
4427  mempool_free(mbox, phba->mbox_mem_pool);
4428  }
4429  lpfc_no_rpi(phba, ndlp);
4430 
4431  if (phba->sli_rev != LPFC_SLI_REV4)
4432  ndlp->nlp_rpi = 0;
4433  ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4434  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4435  return 1;
4436  }
4437  return 0;
4438 }
4439 
4447 void
4449 {
4450  struct lpfc_vport **vports;
4451  struct lpfc_nodelist *ndlp;
4452  struct Scsi_Host *shost;
4453  int i;
4454 
4455  vports = lpfc_create_vport_work_array(phba);
4456  if (!vports) {
4458  "2884 Vport array allocation failed \n");
4459  return;
4460  }
4461  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4462  shost = lpfc_shost_from_vport(vports[i]);
4463  spin_lock_irq(shost->host_lock);
4464  list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4465  if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4466  /* The mempool_alloc might sleep */
4467  spin_unlock_irq(shost->host_lock);
4468  lpfc_unreg_rpi(vports[i], ndlp);
4469  spin_lock_irq(shost->host_lock);
4470  }
4471  }
4472  spin_unlock_irq(shost->host_lock);
4473  }
4474  lpfc_destroy_vport_work_array(phba, vports);
4475 }
4476 
4477 void
4479 {
4480  struct lpfc_hba *phba = vport->phba;
4481  LPFC_MBOXQ_t *mbox;
4482  int rc;
4483 
4484  if (phba->sli_rev == LPFC_SLI_REV4) {
4485  lpfc_sli4_unreg_all_rpis(vport);
4486  return;
4487  }
4488 
4489  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4490  if (mbox) {
4492  mbox);
4493  mbox->vport = vport;
4495  mbox->context1 = NULL;
4496  rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4497  if (rc != MBX_TIMEOUT)
4498  mempool_free(mbox, phba->mbox_mem_pool);
4499 
4500  if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4502  "1836 Could not issue "
4503  "unreg_login(all_rpis) status %d\n", rc);
4504  }
4505 }
4506 
4507 void
4509 {
4510  struct lpfc_hba *phba = vport->phba;
4511  LPFC_MBOXQ_t *mbox;
4512  int rc;
4513 
4514  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4515  if (mbox) {
4517  mbox);
4518  mbox->vport = vport;
4520  mbox->context1 = NULL;
4521  rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4522  if (rc != MBX_TIMEOUT)
4523  mempool_free(mbox, phba->mbox_mem_pool);
4524 
4525  if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4527  "1815 Could not issue "
4528  "unreg_did (default rpis) status %d\n",
4529  rc);
4530  }
4531 }
4532 
4533 /*
4534  * Free resources associated with LPFC_NODELIST entry
4535  * so it can be freed.
4536  */
4537 static int
4538 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4539 {
4540  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4541  struct lpfc_hba *phba = vport->phba;
4542  LPFC_MBOXQ_t *mb, *nextmb;
4543  struct lpfc_dmabuf *mp;
4544 
4545  /* Cleanup node for NPort <nlp_DID> */
4547  "0900 Cleanup node for NPort x%x "
4548  "Data: x%x x%x x%x\n",
4549  ndlp->nlp_DID, ndlp->nlp_flag,
4550  ndlp->nlp_state, ndlp->nlp_rpi);
4551  if (NLP_CHK_FREE_REQ(ndlp)) {
4553  "0280 lpfc_cleanup_node: ndlp:x%p "
4554  "usgmap:x%x refcnt:%d\n",
4555  (void *)ndlp, ndlp->nlp_usg_map,
4556  atomic_read(&ndlp->kref.refcount));
4557  lpfc_dequeue_node(vport, ndlp);
4558  } else {
4560  "0281 lpfc_cleanup_node: ndlp:x%p "
4561  "usgmap:x%x refcnt:%d\n",
4562  (void *)ndlp, ndlp->nlp_usg_map,
4563  atomic_read(&ndlp->kref.refcount));
4564  lpfc_disable_node(vport, ndlp);
4565  }
4566 
4567 
4568  /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4569 
4570  /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4571  if ((mb = phba->sli.mbox_active)) {
4572  if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4573  !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4574  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4575  mb->context2 = NULL;
4577  }
4578  }
4579 
4580  spin_lock_irq(&phba->hbalock);
4581  /* Cleanup REG_LOGIN completions which are not yet processed */
4582  list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4583  if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4584  (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4585  (ndlp != (struct lpfc_nodelist *) mb->context2))
4586  continue;
4587 
4588  mb->context2 = NULL;
4590  }
4591 
4592  list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4593  if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4594  !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4595  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4596  mp = (struct lpfc_dmabuf *) (mb->context1);
4597  if (mp) {
4598  __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4599  kfree(mp);
4600  }
4601  list_del(&mb->list);
4602  mempool_free(mb, phba->mbox_mem_pool);
4603  /* We shall not invoke the lpfc_nlp_put to decrement
4604  * the ndlp reference count as we are in the process
4605  * of lpfc_nlp_release.
4606  */
4607  }
4608  }
4609  spin_unlock_irq(&phba->hbalock);
4610 
4611  lpfc_els_abort(phba, ndlp);
4612 
4613  spin_lock_irq(shost->host_lock);
4614  ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4615  spin_unlock_irq(shost->host_lock);
4616 
4617  ndlp->nlp_last_elscmd = 0;
4618  del_timer_sync(&ndlp->nlp_delayfunc);
4619 
4620  list_del_init(&ndlp->els_retry_evt.evt_listp);
4621  list_del_init(&ndlp->dev_loss_evt.evt_listp);
4622  lpfc_cleanup_vports_rrqs(vport, ndlp);
4623  lpfc_unreg_rpi(vport, ndlp);
4624 
4625  return 0;
4626 }
4627 
4628 /*
4629  * Check to see if we can free the nlp back to the freelist.
4630  * If we are in the middle of using the nlp in the discovery state
4631  * machine, defer the free till we reach the end of the state machine.
4632  */
4633 static void
4634 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4635 {
4636  struct lpfc_hba *phba = vport->phba;
4637  struct lpfc_rport_data *rdata;
4638  LPFC_MBOXQ_t *mbox;
4639  int rc;
4640 
4641  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4642  if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4643  !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4644  !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
4645  /* For this case we need to cleanup the default rpi
4646  * allocated by the firmware.
4647  */
4648  if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
4649  != NULL) {
4650  rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4651  (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
4652  if (rc) {
4653  mempool_free(mbox, phba->mbox_mem_pool);
4654  }
4655  else {
4656  mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4658  mbox->vport = vport;
4659  mbox->context2 = ndlp;
4660  rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4661  if (rc == MBX_NOT_FINISHED) {
4662  mempool_free(mbox, phba->mbox_mem_pool);
4663  }
4664  }
4665  }
4666  }
4667  lpfc_cleanup_node(vport, ndlp);
4668 
4669  /*
4670  * We can get here with a non-NULL ndlp->rport because when we
4671  * unregister a rport we don't break the rport/node linkage. So if we
4672  * do, make sure we don't leaving any dangling pointers behind.
4673  */
4674  if (ndlp->rport) {
4675  rdata = ndlp->rport->dd_data;
4676  rdata->pnode = NULL;
4677  ndlp->rport = NULL;
4678  }
4679 }
4680 
4681 static int
4682 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4683  uint32_t did)
4684 {
4685  D_ID mydid, ndlpdid, matchdid;
4686 
4687  if (did == Bcast_DID)
4688  return 0;
4689 
4690  /* First check for Direct match */
4691  if (ndlp->nlp_DID == did)
4692  return 1;
4693 
4694  /* Next check for area/domain identically equals 0 match */
4695  mydid.un.word = vport->fc_myDID;
4696  if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
4697  return 0;
4698  }
4699 
4700  matchdid.un.word = did;
4701  ndlpdid.un.word = ndlp->nlp_DID;
4702  if (matchdid.un.b.id == ndlpdid.un.b.id) {
4703  if ((mydid.un.b.domain == matchdid.un.b.domain) &&
4704  (mydid.un.b.area == matchdid.un.b.area)) {
4705  if ((ndlpdid.un.b.domain == 0) &&
4706  (ndlpdid.un.b.area == 0)) {
4707  if (ndlpdid.un.b.id)
4708  return 1;
4709  }
4710  return 0;
4711  }
4712 
4713  matchdid.un.word = ndlp->nlp_DID;
4714  if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
4715  (mydid.un.b.area == ndlpdid.un.b.area)) {
4716  if ((matchdid.un.b.domain == 0) &&
4717  (matchdid.un.b.area == 0)) {
4718  if (matchdid.un.b.id)
4719  return 1;
4720  }
4721  }
4722  }
4723  return 0;
4724 }
4725 
4726 /* Search for a nodelist entry */
4727 static struct lpfc_nodelist *
4728 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4729 {
4730  struct lpfc_nodelist *ndlp;
4731  uint32_t data1;
4732 
4733  list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4734  if (lpfc_matchdid(vport, ndlp, did)) {
4735  data1 = (((uint32_t) ndlp->nlp_state << 24) |
4736  ((uint32_t) ndlp->nlp_xri << 16) |
4737  ((uint32_t) ndlp->nlp_type << 8) |
4738  ((uint32_t) ndlp->nlp_rpi & 0xff));
4740  "0929 FIND node DID "
4741  "Data: x%p x%x x%x x%x\n",
4742  ndlp, ndlp->nlp_DID,
4743  ndlp->nlp_flag, data1);
4744  return ndlp;
4745  }
4746  }
4747 
4748  /* FIND node did <did> NOT FOUND */
4750  "0932 FIND node did x%x NOT FOUND.\n", did);
4751  return NULL;
4752 }
4753 
4754 struct lpfc_nodelist *
4756 {
4757  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4758  struct lpfc_nodelist *ndlp;
4759  unsigned long iflags;
4760 
4761  spin_lock_irqsave(shost->host_lock, iflags);
4762  ndlp = __lpfc_findnode_did(vport, did);
4763  spin_unlock_irqrestore(shost->host_lock, iflags);
4764  return ndlp;
4765 }
4766 
4767 struct lpfc_nodelist *
4769 {
4770  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4771  struct lpfc_nodelist *ndlp;
4772 
4773  ndlp = lpfc_findnode_did(vport, did);
4774  if (!ndlp) {
4775  if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
4776  lpfc_rscn_payload_check(vport, did) == 0)
4777  return NULL;
4778  ndlp = (struct lpfc_nodelist *)
4779  mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
4780  if (!ndlp)
4781  return NULL;
4782  lpfc_nlp_init(vport, ndlp, did);
4783  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4784  spin_lock_irq(shost->host_lock);
4785  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4786  spin_unlock_irq(shost->host_lock);
4787  return ndlp;
4788  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4789  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
4790  if (!ndlp)
4791  return NULL;
4792  spin_lock_irq(shost->host_lock);
4793  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4794  spin_unlock_irq(shost->host_lock);
4795  return ndlp;
4796  }
4797 
4798  if ((vport->fc_flag & FC_RSCN_MODE) &&
4799  !(vport->fc_flag & FC_NDISC_ACTIVE)) {
4800  if (lpfc_rscn_payload_check(vport, did)) {
4801  /* If we've already received a PLOGI from this NPort
4802  * we don't need to try to discover it again.
4803  */
4804  if (ndlp->nlp_flag & NLP_RCV_PLOGI)
4805  return NULL;
4806 
4807  /* Since this node is marked for discovery,
4808  * delay timeout is not needed.
4809  */
4810  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4811  spin_lock_irq(shost->host_lock);
4812  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4813  spin_unlock_irq(shost->host_lock);
4814  } else
4815  ndlp = NULL;
4816  } else {
4817  /* If we've already received a PLOGI from this NPort,
4818  * or we are already in the process of discovery on it,
4819  * we don't need to try to discover it again.
4820  */
4821  if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
4822  ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4823  ndlp->nlp_flag & NLP_RCV_PLOGI)
4824  return NULL;
4825  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4826  spin_lock_irq(shost->host_lock);
4827  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4828  spin_unlock_irq(shost->host_lock);
4829  }
4830  return ndlp;
4831 }
4832 
4833 /* Build a list of nodes to discover based on the loopmap */
4834 void
4836 {
4837  struct lpfc_hba *phba = vport->phba;
4838  int j;
4839  uint32_t alpa, index;
4840 
4841  if (!lpfc_is_link_up(phba))
4842  return;
4843 
4844  if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
4845  return;
4846 
4847  /* Check for loop map present or not */
4848  if (phba->alpa_map[0]) {
4849  for (j = 1; j <= phba->alpa_map[0]; j++) {
4850  alpa = phba->alpa_map[j];
4851  if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
4852  continue;
4853  lpfc_setup_disc_node(vport, alpa);
4854  }
4855  } else {
4856  /* No alpamap, so try all alpa's */
4857  for (j = 0; j < FC_MAXLOOP; j++) {
4858  /* If cfg_scan_down is set, start from highest
4859  * ALPA (0xef) to lowest (0x1).
4860  */
4861  if (vport->cfg_scan_down)
4862  index = j;
4863  else
4864  index = FC_MAXLOOP - j - 1;
4865  alpa = lpfcAlpaArray[index];
4866  if ((vport->fc_myDID & 0xff) == alpa)
4867  continue;
4868  lpfc_setup_disc_node(vport, alpa);
4869  }
4870  }
4871  return;
4872 }
4873 
4874 void
4875 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
4876 {
4877  LPFC_MBOXQ_t *mbox;
4878  struct lpfc_sli *psli = &phba->sli;
4879  struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
4880  struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
4881  struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
4882  int rc;
4883 
4884  /*
4885  * if it's not a physical port or if we already send
4886  * clear_la then don't send it.
4887  */
4888  if ((phba->link_state >= LPFC_CLEAR_LA) ||
4889  (vport->port_type != LPFC_PHYSICAL_PORT) ||
4890  (phba->sli_rev == LPFC_SLI_REV4))
4891  return;
4892 
4893  /* Link up discovery */
4894  if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
4895  phba->link_state = LPFC_CLEAR_LA;
4896  lpfc_clear_la(phba, mbox);
4897  mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
4898  mbox->vport = vport;
4899  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4900  if (rc == MBX_NOT_FINISHED) {
4901  mempool_free(mbox, phba->mbox_mem_pool);
4902  lpfc_disc_flush_list(vport);
4903  extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4904  fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4905  next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4906  phba->link_state = LPFC_HBA_ERROR;
4907  }
4908  }
4909 }
4910 
4911 /* Reg_vpi to tell firmware to resume normal operations */
4912 void
4913 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4914 {
4915  LPFC_MBOXQ_t *regvpimbox;
4916 
4917  regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4918  if (regvpimbox) {
4919  lpfc_reg_vpi(vport, regvpimbox);
4920  regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
4921  regvpimbox->vport = vport;
4922  if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
4923  == MBX_NOT_FINISHED) {
4924  mempool_free(regvpimbox, phba->mbox_mem_pool);
4925  }
4926  }
4927 }
4928 
4929 /* Start Link up / RSCN discovery on NPR nodes */
4930 void
4932 {
4933  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4934  struct lpfc_hba *phba = vport->phba;
4935  uint32_t num_sent;
4936  uint32_t clear_la_pending;
4937  int did_changed;
4938 
4939  if (!lpfc_is_link_up(phba))
4940  return;
4941 
4942  if (phba->link_state == LPFC_CLEAR_LA)
4943  clear_la_pending = 1;
4944  else
4945  clear_la_pending = 0;
4946 
4947  if (vport->port_state < LPFC_VPORT_READY)
4948  vport->port_state = LPFC_DISC_AUTH;
4949 
4950  lpfc_set_disctmo(vport);
4951 
4952  if (vport->fc_prevDID == vport->fc_myDID)
4953  did_changed = 0;
4954  else
4955  did_changed = 1;
4956 
4957  vport->fc_prevDID = vport->fc_myDID;
4958  vport->num_disc_nodes = 0;
4959 
4960  /* Start Discovery state <hba_state> */
4962  "0202 Start Discovery hba state x%x "
4963  "Data: x%x x%x x%x\n",
4964  vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
4965  vport->fc_adisc_cnt);
4966 
4967  /* First do ADISCs - if any */
4968  num_sent = lpfc_els_disc_adisc(vport);
4969 
4970  if (num_sent)
4971  return;
4972 
4973  /* Register the VPI for SLI3, NON-NPIV only. */
4974  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4975  !(vport->fc_flag & FC_PT2PT) &&
4976  !(vport->fc_flag & FC_RSCN_MODE) &&
4977  (phba->sli_rev < LPFC_SLI_REV4)) {
4978  lpfc_issue_reg_vpi(phba, vport);
4979  return;
4980  }
4981 
4982  /*
4983  * For SLI2, we need to set port_state to READY and continue
4984  * discovery.
4985  */
4986  if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
4987  /* If we get here, there is nothing to ADISC */
4988  if (vport->port_type == LPFC_PHYSICAL_PORT)
4989  lpfc_issue_clear_la(phba, vport);
4990 
4991  if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
4992  vport->num_disc_nodes = 0;
4993  /* go thru NPR nodes and issue ELS PLOGIs */
4994  if (vport->fc_npr_cnt)
4995  lpfc_els_disc_plogi(vport);
4996 
4997  if (!vport->num_disc_nodes) {
4998  spin_lock_irq(shost->host_lock);
4999  vport->fc_flag &= ~FC_NDISC_ACTIVE;
5000  spin_unlock_irq(shost->host_lock);
5001  lpfc_can_disctmo(vport);
5002  }
5003  }
5004  vport->port_state = LPFC_VPORT_READY;
5005  } else {
5006  /* Next do PLOGIs - if any */
5007  num_sent = lpfc_els_disc_plogi(vport);
5008 
5009  if (num_sent)
5010  return;
5011 
5012  if (vport->fc_flag & FC_RSCN_MODE) {
5013  /* Check to see if more RSCNs came in while we
5014  * were processing this one.
5015  */
5016  if ((vport->fc_rscn_id_cnt == 0) &&
5017  (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5018  spin_lock_irq(shost->host_lock);
5019  vport->fc_flag &= ~FC_RSCN_MODE;
5020  spin_unlock_irq(shost->host_lock);
5021  lpfc_can_disctmo(vport);
5022  } else
5023  lpfc_els_handle_rscn(vport);
5024  }
5025  }
5026  return;
5027 }
5028 
5029 /*
5030  * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5031  * ring the match the sppecified nodelist.
5032  */
5033 static void
5034 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5035 {
5036  LIST_HEAD(completions);
5037  struct lpfc_sli *psli;
5038  IOCB_t *icmd;
5039  struct lpfc_iocbq *iocb, *next_iocb;
5040  struct lpfc_sli_ring *pring;
5041 
5042  psli = &phba->sli;
5043  pring = &psli->ring[LPFC_ELS_RING];
5044 
5045  /* Error matching iocb on txq or txcmplq
5046  * First check the txq.
5047  */
5048  spin_lock_irq(&phba->hbalock);
5049  list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5050  if (iocb->context1 != ndlp) {
5051  continue;
5052  }
5053  icmd = &iocb->iocb;
5054  if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5055  (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5056 
5057  list_move_tail(&iocb->list, &completions);
5058  pring->txq_cnt--;
5059  }
5060  }
5061 
5062  /* Next check the txcmplq */
5063  list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5064  if (iocb->context1 != ndlp) {
5065  continue;
5066  }
5067  icmd = &iocb->iocb;
5068  if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5069  icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5070  lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5071  }
5072  }
5073  spin_unlock_irq(&phba->hbalock);
5074 
5075  /* Cancel all the IOCBs from the completions list */
5076  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5078 }
5079 
5080 static void
5081 lpfc_disc_flush_list(struct lpfc_vport *vport)
5082 {
5083  struct lpfc_nodelist *ndlp, *next_ndlp;
5084  struct lpfc_hba *phba = vport->phba;
5085 
5086  if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5087  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5088  nlp_listp) {
5089  if (!NLP_CHK_NODE_ACT(ndlp))
5090  continue;
5091  if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5092  ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5093  lpfc_free_tx(phba, ndlp);
5094  }
5095  }
5096  }
5097 }
5098 
5099 void
5101 {
5102  lpfc_els_flush_rscn(vport);
5103  lpfc_els_flush_cmd(vport);
5104  lpfc_disc_flush_list(vport);
5105 }
5106 
5107 /*****************************************************************************/
5108 /*
5109  * NAME: lpfc_disc_timeout
5110  *
5111  * FUNCTION: Fibre Channel driver discovery timeout routine.
5112  *
5113  * EXECUTION ENVIRONMENT: interrupt only
5114  *
5115  * CALLED FROM:
5116  * Timer function
5117  *
5118  * RETURNS:
5119  * none
5120  */
5121 /*****************************************************************************/
5122 void
5123 lpfc_disc_timeout(unsigned long ptr)
5124 {
5125  struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5126  struct lpfc_hba *phba = vport->phba;
5127  uint32_t tmo_posted;
5128  unsigned long flags = 0;
5129 
5130  if (unlikely(!phba))
5131  return;
5132 
5133  spin_lock_irqsave(&vport->work_port_lock, flags);
5134  tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5135  if (!tmo_posted)
5137  spin_unlock_irqrestore(&vport->work_port_lock, flags);
5138 
5139  if (!tmo_posted)
5140  lpfc_worker_wake_up(phba);
5141  return;
5142 }
5143 
5144 static void
5145 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5146 {
5147  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5148  struct lpfc_hba *phba = vport->phba;
5149  struct lpfc_sli *psli = &phba->sli;
5150  struct lpfc_nodelist *ndlp, *next_ndlp;
5151  LPFC_MBOXQ_t *initlinkmbox;
5152  int rc, clrlaerr = 0;
5153 
5154  if (!(vport->fc_flag & FC_DISC_TMO))
5155  return;
5156 
5157  spin_lock_irq(shost->host_lock);
5158  vport->fc_flag &= ~FC_DISC_TMO;
5159  spin_unlock_irq(shost->host_lock);
5160 
5162  "disc timeout: state:x%x rtry:x%x flg:x%x",
5163  vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5164 
5165  switch (vport->port_state) {
5166 
5167  case LPFC_LOCAL_CFG_LINK:
5168  /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5169  * FAN
5170  */
5171  /* FAN timeout */
5173  "0221 FAN timeout\n");
5174  /* Start discovery by sending FLOGI, clean up old rpis */
5175  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5176  nlp_listp) {
5177  if (!NLP_CHK_NODE_ACT(ndlp))
5178  continue;
5179  if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5180  continue;
5181  if (ndlp->nlp_type & NLP_FABRIC) {
5182  /* Clean up the ndlp on Fabric connections */
5183  lpfc_drop_node(vport, ndlp);
5184 
5185  } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5186  /* Fail outstanding IO now since device
5187  * is marked for PLOGI.
5188  */
5189  lpfc_unreg_rpi(vport, ndlp);
5190  }
5191  }
5192  if (vport->port_state != LPFC_FLOGI) {
5193  if (phba->sli_rev <= LPFC_SLI_REV3)
5194  lpfc_initial_flogi(vport);
5195  else
5196  lpfc_issue_init_vfi(vport);
5197  return;
5198  }
5199  break;
5200 
5201  case LPFC_FDISC:
5202  case LPFC_FLOGI:
5203  /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5204  /* Initial FLOGI timeout */
5206  "0222 Initial %s timeout\n",
5207  vport->vpi ? "FDISC" : "FLOGI");
5208 
5209  /* Assume no Fabric and go on with discovery.
5210  * Check for outstanding ELS FLOGI to abort.
5211  */
5212 
5213  /* FLOGI failed, so just use loop map to make discovery list */
5214  lpfc_disc_list_loopmap(vport);
5215 
5216  /* Start discovery */
5217  lpfc_disc_start(vport);
5218  break;
5219 
5220  case LPFC_FABRIC_CFG_LINK:
5221  /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5222  NameServer login */
5224  "0223 Timeout while waiting for "
5225  "NameServer login\n");
5226  /* Next look for NameServer ndlp */
5227  ndlp = lpfc_findnode_did(vport, NameServer_DID);
5228  if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5229  lpfc_els_abort(phba, ndlp);
5230 
5231  /* ReStart discovery */
5232  goto restart_disc;
5233 
5234  case LPFC_NS_QRY:
5235  /* Check for wait for NameServer Rsp timeout */
5237  "0224 NameServer Query timeout "
5238  "Data: x%x x%x\n",
5239  vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5240 
5241  if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5242  /* Try it one more time */
5243  vport->fc_ns_retry++;
5244  rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
5245  vport->fc_ns_retry, 0);
5246  if (rc == 0)
5247  break;
5248  }
5249  vport->fc_ns_retry = 0;
5250 
5251 restart_disc:
5252  /*
5253  * Discovery is over.
5254  * set port_state to PORT_READY if SLI2.
5255  * cmpl_reg_vpi will set port_state to READY for SLI3.
5256  */
5257  if (phba->sli_rev < LPFC_SLI_REV4) {
5259  lpfc_issue_reg_vpi(phba, vport);
5260  else {
5261  lpfc_issue_clear_la(phba, vport);
5262  vport->port_state = LPFC_VPORT_READY;
5263  }
5264  }
5265 
5266  /* Setup and issue mailbox INITIALIZE LINK command */
5267  initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5268  if (!initlinkmbox) {
5270  "0206 Device Discovery "
5271  "completion error\n");
5272  phba->link_state = LPFC_HBA_ERROR;
5273  break;
5274  }
5275 
5276  lpfc_linkdown(phba);
5277  lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5278  phba->cfg_link_speed);
5279  initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5280  initlinkmbox->vport = vport;
5281  initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5282  rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5283  lpfc_set_loopback_flag(phba);
5284  if (rc == MBX_NOT_FINISHED)
5285  mempool_free(initlinkmbox, phba->mbox_mem_pool);
5286 
5287  break;
5288 
5289  case LPFC_DISC_AUTH:
5290  /* Node Authentication timeout */
5292  "0227 Node Authentication timeout\n");
5293  lpfc_disc_flush_list(vport);
5294 
5295  /*
5296  * set port_state to PORT_READY if SLI2.
5297  * cmpl_reg_vpi will set port_state to READY for SLI3.
5298  */
5299  if (phba->sli_rev < LPFC_SLI_REV4) {
5301  lpfc_issue_reg_vpi(phba, vport);
5302  else { /* NPIV Not enabled */
5303  lpfc_issue_clear_la(phba, vport);
5304  vport->port_state = LPFC_VPORT_READY;
5305  }
5306  }
5307  break;
5308 
5309  case LPFC_VPORT_READY:
5310  if (vport->fc_flag & FC_RSCN_MODE) {
5312  "0231 RSCN timeout Data: x%x "
5313  "x%x\n",
5314  vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5315 
5316  /* Cleanup any outstanding ELS commands */
5317  lpfc_els_flush_cmd(vport);
5318 
5319  lpfc_els_flush_rscn(vport);
5320  lpfc_disc_flush_list(vport);
5321  }
5322  break;
5323 
5324  default:
5326  "0273 Unexpected discovery timeout, "
5327  "vport State x%x\n", vport->port_state);
5328  break;
5329  }
5330 
5331  switch (phba->link_state) {
5332  case LPFC_CLEAR_LA:
5333  /* CLEAR LA timeout */
5335  "0228 CLEAR LA timeout\n");
5336  clrlaerr = 1;
5337  break;
5338 
5339  case LPFC_LINK_UP:
5340  lpfc_issue_clear_la(phba, vport);
5341  /* Drop thru */
5342  case LPFC_LINK_UNKNOWN:
5343  case LPFC_WARM_START:
5344  case LPFC_INIT_START:
5345  case LPFC_INIT_MBX_CMDS:
5346  case LPFC_LINK_DOWN:
5347  case LPFC_HBA_ERROR:
5349  "0230 Unexpected timeout, hba link "
5350  "state x%x\n", phba->link_state);
5351  clrlaerr = 1;
5352  break;
5353 
5354  case LPFC_HBA_READY:
5355  break;
5356  }
5357 
5358  if (clrlaerr) {
5359  lpfc_disc_flush_list(vport);
5360  psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5361  psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5362  psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5363  vport->port_state = LPFC_VPORT_READY;
5364  }
5365 
5366  return;
5367 }
5368 
5369 /*
5370  * This routine handles processing a NameServer REG_LOGIN mailbox
5371  * command upon completion. It is setup in the LPFC_MBOXQ
5372  * as the completion routine when the command is
5373  * handed off to the SLI layer.
5374  */
5375 void
5377 {
5378  MAILBOX_t *mb = &pmb->u.mb;
5379  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
5380  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5381  struct lpfc_vport *vport = pmb->vport;
5382 
5383  pmb->context1 = NULL;
5384  pmb->context2 = NULL;
5385 
5386  if (phba->sli_rev < LPFC_SLI_REV4)
5387  ndlp->nlp_rpi = mb->un.varWords[0];
5388  ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5389  ndlp->nlp_type |= NLP_FABRIC;
5391 
5392  /*
5393  * Start issuing Fabric-Device Management Interface (FDMI) command to
5394  * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5395  * fdmi-on=2 (supporting RPA/hostnmae)
5396  */
5397 
5398  if (vport->cfg_fdmi_on == 1)
5399  lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
5400  else
5401  mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
5402 
5403  /* decrement the node reference count held for this callback
5404  * function.
5405  */
5406  lpfc_nlp_put(ndlp);
5407  lpfc_mbuf_free(phba, mp->virt, mp->phys);
5408  kfree(mp);
5409  mempool_free(pmb, phba->mbox_mem_pool);
5410 
5411  return;
5412 }
5413 
5414 static int
5415 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5416 {
5417  uint16_t *rpi = param;
5418 
5419  /* check for active node */
5420  if (!NLP_CHK_NODE_ACT(ndlp))
5421  return 0;
5422 
5423  return ndlp->nlp_rpi == *rpi;
5424 }
5425 
5426 static int
5427 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5428 {
5429  return memcmp(&ndlp->nlp_portname, param,
5430  sizeof(ndlp->nlp_portname)) == 0;
5431 }
5432 
5433 static struct lpfc_nodelist *
5434 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5435 {
5436  struct lpfc_nodelist *ndlp;
5437 
5438  list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5439  if (filter(ndlp, param)) {
5441  "3185 FIND node filter %p DID "
5442  "Data: x%p x%x x%x\n",
5443  filter, ndlp, ndlp->nlp_DID,
5444  ndlp->nlp_flag);
5445  return ndlp;
5446  }
5447  }
5449  "3186 FIND node filter %p NOT FOUND.\n", filter);
5450  return NULL;
5451 }
5452 
5453 /*
5454  * This routine looks up the ndlp lists for the given RPI. If rpi found it
5455  * returns the node list element pointer else return NULL.
5456  */
5457 struct lpfc_nodelist *
5459 {
5460  return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
5461 }
5462 
5463 /*
5464  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5465  * returns the node element list pointer else return NULL.
5466  */
5467 struct lpfc_nodelist *
5469 {
5470  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5471  struct lpfc_nodelist *ndlp;
5472 
5473  spin_lock_irq(shost->host_lock);
5474  ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
5475  spin_unlock_irq(shost->host_lock);
5476  return ndlp;
5477 }
5478 
5479 /*
5480  * This routine looks up the ndlp lists for the given RPI. If the rpi
5481  * is found, the routine returns the node element list pointer else
5482  * return NULL.
5483  */
5484 struct lpfc_nodelist *
5486 {
5487  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5488  struct lpfc_nodelist *ndlp;
5489 
5490  spin_lock_irq(shost->host_lock);
5491  ndlp = __lpfc_findnode_rpi(vport, rpi);
5492  spin_unlock_irq(shost->host_lock);
5493  return ndlp;
5494 }
5495 
5509 struct lpfc_vport *
5511 {
5512  struct lpfc_vport *vport;
5513  unsigned long flags;
5514  int i = 0;
5515 
5516  /* The physical ports are always vpi 0 - translate is unnecessary. */
5517  if (vpi > 0) {
5518  /*
5519  * Translate the physical vpi to the logical vpi. The
5520  * vport stores the logical vpi.
5521  */
5522  for (i = 0; i < phba->max_vpi; i++) {
5523  if (vpi == phba->vpi_ids[i])
5524  break;
5525  }
5526 
5527  if (i >= phba->max_vpi) {
5529  "2936 Could not find Vport mapped "
5530  "to vpi %d\n", vpi);
5531  return NULL;
5532  }
5533  }
5534 
5535  spin_lock_irqsave(&phba->hbalock, flags);
5536  list_for_each_entry(vport, &phba->port_list, listentry) {
5537  if (vport->vpi == i) {
5538  spin_unlock_irqrestore(&phba->hbalock, flags);
5539  return vport;
5540  }
5541  }
5542  spin_unlock_irqrestore(&phba->hbalock, flags);
5543  return NULL;
5544 }
5545 
5546 void
5547 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5548  uint32_t did)
5549 {
5550  memset(ndlp, 0, sizeof (struct lpfc_nodelist));
5551 
5552  lpfc_initialize_node(vport, ndlp, did);
5553  INIT_LIST_HEAD(&ndlp->nlp_listp);
5554 
5556  "node init: did:x%x",
5557  ndlp->nlp_DID, 0, 0);
5558 
5559  return;
5560 }
5561 
5562 /* This routine releases all resources associated with a specifc NPort's ndlp
5563  * and mempool_free's the nodelist.
5564  */
5565 static void
5566 lpfc_nlp_release(struct kref *kref)
5567 {
5568  struct lpfc_hba *phba;
5569  unsigned long flags;
5570  struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
5571  kref);
5572 
5574  "node release: did:x%x flg:x%x type:x%x",
5575  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
5576 
5578  "0279 lpfc_nlp_release: ndlp:x%p did %x "
5579  "usgmap:x%x refcnt:%d\n",
5580  (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
5581  atomic_read(&ndlp->kref.refcount));
5582 
5583  /* remove ndlp from action. */
5584  lpfc_nlp_remove(ndlp->vport, ndlp);
5585 
5586  /* clear the ndlp active flag for all release cases */
5587  phba = ndlp->phba;
5588  spin_lock_irqsave(&phba->ndlp_lock, flags);
5589  NLP_CLR_NODE_ACT(ndlp);
5590  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5591  if (phba->sli_rev == LPFC_SLI_REV4)
5592  lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5593 
5594  /* free ndlp memory for final ndlp release */
5595  if (NLP_CHK_FREE_REQ(ndlp)) {
5596  kfree(ndlp->lat_data);
5597  mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
5598  }
5599 }
5600 
5601 /* This routine bumps the reference count for a ndlp structure to ensure
5602  * that one discovery thread won't free a ndlp while another discovery thread
5603  * is using it.
5604  */
5605 struct lpfc_nodelist *
5607 {
5608  struct lpfc_hba *phba;
5609  unsigned long flags;
5610 
5611  if (ndlp) {
5613  "node get: did:x%x flg:x%x refcnt:x%x",
5614  ndlp->nlp_DID, ndlp->nlp_flag,
5615  atomic_read(&ndlp->kref.refcount));
5616  /* The check of ndlp usage to prevent incrementing the
5617  * ndlp reference count that is in the process of being
5618  * released.
5619  */
5620  phba = ndlp->phba;
5621  spin_lock_irqsave(&phba->ndlp_lock, flags);
5622  if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
5623  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5625  "0276 lpfc_nlp_get: ndlp:x%p "
5626  "usgmap:x%x refcnt:%d\n",
5627  (void *)ndlp, ndlp->nlp_usg_map,
5628  atomic_read(&ndlp->kref.refcount));
5629  return NULL;
5630  } else
5631  kref_get(&ndlp->kref);
5632  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5633  }
5634  return ndlp;
5635 }
5636 
5637 /* This routine decrements the reference count for a ndlp structure. If the
5638  * count goes to 0, this indicates the the associated nodelist should be
5639  * freed. Returning 1 indicates the ndlp resource has been released; on the
5640  * other hand, returning 0 indicates the ndlp resource has not been released
5641  * yet.
5642  */
5643 int
5645 {
5646  struct lpfc_hba *phba;
5647  unsigned long flags;
5648 
5649  if (!ndlp)
5650  return 1;
5651 
5653  "node put: did:x%x flg:x%x refcnt:x%x",
5654  ndlp->nlp_DID, ndlp->nlp_flag,
5655  atomic_read(&ndlp->kref.refcount));
5656  phba = ndlp->phba;
5657  spin_lock_irqsave(&phba->ndlp_lock, flags);
5658  /* Check the ndlp memory free acknowledge flag to avoid the
5659  * possible race condition that kref_put got invoked again
5660  * after previous one has done ndlp memory free.
5661  */
5662  if (NLP_CHK_FREE_ACK(ndlp)) {
5663  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5665  "0274 lpfc_nlp_put: ndlp:x%p "
5666  "usgmap:x%x refcnt:%d\n",
5667  (void *)ndlp, ndlp->nlp_usg_map,
5668  atomic_read(&ndlp->kref.refcount));
5669  return 1;
5670  }
5671  /* Check the ndlp inactivate log flag to avoid the possible
5672  * race condition that kref_put got invoked again after ndlp
5673  * is already in inactivating state.
5674  */
5675  if (NLP_CHK_IACT_REQ(ndlp)) {
5676  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5678  "0275 lpfc_nlp_put: ndlp:x%p "
5679  "usgmap:x%x refcnt:%d\n",
5680  (void *)ndlp, ndlp->nlp_usg_map,
5681  atomic_read(&ndlp->kref.refcount));
5682  return 1;
5683  }
5684  /* For last put, mark the ndlp usage flags to make sure no
5685  * other kref_get and kref_put on the same ndlp shall get
5686  * in between the process when the final kref_put has been
5687  * invoked on this ndlp.
5688  */
5689  if (atomic_read(&ndlp->kref.refcount) == 1) {
5690  /* Indicate ndlp is put to inactive state. */
5691  NLP_SET_IACT_REQ(ndlp);
5692  /* Acknowledge ndlp memory free has been seen. */
5693  if (NLP_CHK_FREE_REQ(ndlp))
5694  NLP_SET_FREE_ACK(ndlp);
5695  }
5696  spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5697  /* Note, the kref_put returns 1 when decrementing a reference
5698  * count that was 1, it invokes the release callback function,
5699  * but it still left the reference count as 1 (not actually
5700  * performs the last decrementation). Otherwise, it actually
5701  * decrements the reference count and returns 0.
5702  */
5703  return kref_put(&ndlp->kref, lpfc_nlp_release);
5704 }
5705 
5706 /* This routine free's the specified nodelist if it is not in use
5707  * by any other discovery thread. This routine returns 1 if the
5708  * ndlp has been freed. A return value of 0 indicates the ndlp is
5709  * not yet been released.
5710  */
5711 int
5713 {
5715  "node not used: did:x%x flg:x%x refcnt:x%x",
5716  ndlp->nlp_DID, ndlp->nlp_flag,
5717  atomic_read(&ndlp->kref.refcount));
5718  if (atomic_read(&ndlp->kref.refcount) == 1)
5719  if (lpfc_nlp_put(ndlp))
5720  return 1;
5721  return 0;
5722 }
5723 
5734 static int
5735 lpfc_fcf_inuse(struct lpfc_hba *phba)
5736 {
5737  struct lpfc_vport **vports;
5738  int i, ret = 0;
5739  struct lpfc_nodelist *ndlp;
5740  struct Scsi_Host *shost;
5741 
5742  vports = lpfc_create_vport_work_array(phba);
5743 
5744  /* If driver cannot allocate memory, indicate fcf is in use */
5745  if (!vports)
5746  return 1;
5747 
5748  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5749  shost = lpfc_shost_from_vport(vports[i]);
5750  spin_lock_irq(shost->host_lock);
5751  /*
5752  * IF the CVL_RCVD bit is not set then we have sent the
5753  * flogi.
5754  * If dev_loss fires while we are waiting we do not want to
5755  * unreg the fcf.
5756  */
5757  if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5758  spin_unlock_irq(shost->host_lock);
5759  ret = 1;
5760  goto out;
5761  }
5762  list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5763  if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5764  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
5765  ret = 1;
5766  spin_unlock_irq(shost->host_lock);
5767  goto out;
5768  } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5769  ret = 1;
5771  "2624 RPI %x DID %x flag %x "
5772  "still logged in\n",
5773  ndlp->nlp_rpi, ndlp->nlp_DID,
5774  ndlp->nlp_flag);
5775  }
5776  }
5777  spin_unlock_irq(shost->host_lock);
5778  }
5779 out:
5780  lpfc_destroy_vport_work_array(phba, vports);
5781  return ret;
5782 }
5783 
5791 void
5793 {
5794  struct lpfc_vport *vport = mboxq->vport;
5795  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5796 
5797  if (mboxq->u.mb.mbxStatus) {
5799  "2555 UNREG_VFI mbxStatus error x%x "
5800  "HBA state x%x\n",
5801  mboxq->u.mb.mbxStatus, vport->port_state);
5802  }
5803  spin_lock_irq(shost->host_lock);
5804  phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
5805  spin_unlock_irq(shost->host_lock);
5806  mempool_free(mboxq, phba->mbox_mem_pool);
5807  return;
5808 }
5809 
5817 static void
5818 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5819 {
5820  struct lpfc_vport *vport = mboxq->vport;
5821 
5822  if (mboxq->u.mb.mbxStatus) {
5824  "2550 UNREG_FCFI mbxStatus error x%x "
5825  "HBA state x%x\n",
5826  mboxq->u.mb.mbxStatus, vport->port_state);
5827  }
5828  mempool_free(mboxq, phba->mbox_mem_pool);
5829  return;
5830 }
5831 
5840 int
5842 {
5843  struct lpfc_vport **vports;
5844  struct lpfc_nodelist *ndlp;
5845  struct Scsi_Host *shost;
5846  int i, rc;
5847 
5848  /* Unregister RPIs */
5849  if (lpfc_fcf_inuse(phba))
5850  lpfc_unreg_hba_rpis(phba);
5851 
5852  /* At this point, all discovery is aborted */
5853  phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5854 
5855  /* Unregister VPIs */
5856  vports = lpfc_create_vport_work_array(phba);
5857  if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
5858  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5859  /* Stop FLOGI/FDISC retries */
5860  ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
5861  if (ndlp)
5862  lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
5863  lpfc_cleanup_pending_mbox(vports[i]);
5864  if (phba->sli_rev == LPFC_SLI_REV4)
5865  lpfc_sli4_unreg_all_rpis(vports[i]);
5866  lpfc_mbx_unreg_vpi(vports[i]);
5867  shost = lpfc_shost_from_vport(vports[i]);
5868  spin_lock_irq(shost->host_lock);
5869  vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
5870  vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
5871  spin_unlock_irq(shost->host_lock);
5872  }
5873  lpfc_destroy_vport_work_array(phba, vports);
5874 
5875  /* Cleanup any outstanding ELS commands */
5876  lpfc_els_flush_all_cmd(phba);
5877 
5878  /* Unregister the physical port VFI */
5879  rc = lpfc_issue_unreg_vfi(phba->pport);
5880  return rc;
5881 }
5882 
5893 int
5895 {
5896  LPFC_MBOXQ_t *mbox;
5897  int rc;
5898 
5899  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5900  if (!mbox) {
5902  "2551 UNREG_FCFI mbox allocation failed"
5903  "HBA state x%x\n", phba->pport->port_state);
5904  return -ENOMEM;
5905  }
5906  lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
5907  mbox->vport = phba->pport;
5908  mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
5909  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5910 
5911  if (rc == MBX_NOT_FINISHED) {
5913  "2552 Unregister FCFI command failed rc x%x "
5914  "HBA state x%x\n",
5915  rc, phba->pport->port_state);
5916  return -EINVAL;
5917  }
5918  return 0;
5919 }
5920 
5928 void
5930 {
5931  int rc;
5932 
5933  /* Preparation for unregistering fcf */
5934  rc = lpfc_unregister_fcf_prep(phba);
5935  if (rc) {
5937  "2748 Failed to prepare for unregistering "
5938  "HBA's FCF record: rc=%d\n", rc);
5939  return;
5940  }
5941 
5942  /* Now, unregister FCF record and reset HBA FCF state */
5943  rc = lpfc_sli4_unregister_fcf(phba);
5944  if (rc)
5945  return;
5946  /* Reset HBA FCF states after successful unregister FCF */
5947  phba->fcf.fcf_flag = 0;
5948  phba->fcf.current_rec.flag = 0;
5949 
5950  /*
5951  * If driver is not unloading, check if there is any other
5952  * FCF record that can be used for discovery.
5953  */
5954  if ((phba->pport->load_flag & FC_UNLOADING) ||
5955  (phba->link_state < LPFC_LINK_UP))
5956  return;
5957 
5958  /* This is considered as the initial FCF discovery scan */
5959  spin_lock_irq(&phba->hbalock);
5960  phba->fcf.fcf_flag |= FCF_INIT_DISC;
5961  spin_unlock_irq(&phba->hbalock);
5962 
5963  /* Reset FCF roundrobin bmask for new discovery */
5965 
5967 
5968  if (rc) {
5969  spin_lock_irq(&phba->hbalock);
5970  phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5971  spin_unlock_irq(&phba->hbalock);
5973  "2553 lpfc_unregister_unused_fcf failed "
5974  "to read FCF record HBA state x%x\n",
5975  phba->pport->port_state);
5976  }
5977 }
5978 
5986 void
5988 {
5989  int rc;
5990 
5991  /* Preparation for unregistering fcf */
5992  rc = lpfc_unregister_fcf_prep(phba);
5993  if (rc) {
5995  "2749 Failed to prepare for unregistering "
5996  "HBA's FCF record: rc=%d\n", rc);
5997  return;
5998  }
5999 
6000  /* Now, unregister FCF record and reset HBA FCF state */
6001  rc = lpfc_sli4_unregister_fcf(phba);
6002  if (rc)
6003  return;
6004  /* Set proper HBA FCF states after successful unregister FCF */
6005  spin_lock_irq(&phba->hbalock);
6006  phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6007  spin_unlock_irq(&phba->hbalock);
6008 }
6009 
6018 void
6020 {
6021  /*
6022  * If HBA is not running in FIP mode, if HBA does not support
6023  * FCoE, if FCF discovery is ongoing, or if FCF has not been
6024  * registered, do nothing.
6025  */
6026  spin_lock_irq(&phba->hbalock);
6027  if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6028  !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6029  !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6030  (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6031  (phba->pport->port_state == LPFC_FLOGI)) {
6032  spin_unlock_irq(&phba->hbalock);
6033  return;
6034  }
6035  spin_unlock_irq(&phba->hbalock);
6036 
6037  if (lpfc_fcf_inuse(phba))
6038  return;
6039 
6041 }
6042 
6051 static void
6052 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6053  uint8_t *buff)
6054 {
6055  struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6056  struct lpfc_fcf_conn_hdr *conn_hdr;
6057  struct lpfc_fcf_conn_rec *conn_rec;
6058  uint32_t record_count;
6059  int i;
6060 
6061  /* Free the current connect table */
6062  list_for_each_entry_safe(conn_entry, next_conn_entry,
6063  &phba->fcf_conn_rec_list, list) {
6064  list_del_init(&conn_entry->list);
6065  kfree(conn_entry);
6066  }
6067 
6068  conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6069  record_count = conn_hdr->length * sizeof(uint32_t)/
6070  sizeof(struct lpfc_fcf_conn_rec);
6071 
6072  conn_rec = (struct lpfc_fcf_conn_rec *)
6073  (buff + sizeof(struct lpfc_fcf_conn_hdr));
6074 
6075  for (i = 0; i < record_count; i++) {
6076  if (!(conn_rec[i].flags & FCFCNCT_VALID))
6077  continue;
6078  conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6079  GFP_KERNEL);
6080  if (!conn_entry) {
6082  "2566 Failed to allocate connection"
6083  " table entry\n");
6084  return;
6085  }
6086 
6087  memcpy(&conn_entry->conn_rec, &conn_rec[i],
6088  sizeof(struct lpfc_fcf_conn_rec));
6089  conn_entry->conn_rec.vlan_tag =
6090  le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
6091  conn_entry->conn_rec.flags =
6092  le16_to_cpu(conn_entry->conn_rec.flags);
6093  list_add_tail(&conn_entry->list,
6094  &phba->fcf_conn_rec_list);
6095  }
6096 }
6097 
6106 static void
6107 lpfc_read_fcoe_param(struct lpfc_hba *phba,
6108  uint8_t *buff)
6109 {
6110  struct lpfc_fip_param_hdr *fcoe_param_hdr;
6111  struct lpfc_fcoe_params *fcoe_param;
6112 
6113  fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6114  buff;
6115  fcoe_param = (struct lpfc_fcoe_params *)
6116  (buff + sizeof(struct lpfc_fip_param_hdr));
6117 
6118  if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6119  (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6120  return;
6121 
6122  if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6123  phba->valid_vlan = 1;
6124  phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6125  0xFFF;
6126  }
6127 
6128  phba->fc_map[0] = fcoe_param->fc_map[0];
6129  phba->fc_map[1] = fcoe_param->fc_map[1];
6130  phba->fc_map[2] = fcoe_param->fc_map[2];
6131  return;
6132 }
6133 
6144 static uint8_t *
6145 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6146 {
6147  uint32_t offset = 0, rec_length;
6148 
6149  if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6150  (size < sizeof(uint32_t)))
6151  return NULL;
6152 
6153  rec_length = buff[offset + 1];
6154 
6155  /*
6156  * One TLV record has one word header and number of data words
6157  * specified in the rec_length field of the record header.
6158  */
6159  while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6160  <= size) {
6161  if (buff[offset] == rec_type)
6162  return &buff[offset];
6163 
6164  if (buff[offset] == LPFC_REGION23_LAST_REC)
6165  return NULL;
6166 
6167  offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6168  rec_length = buff[offset + 1];
6169  }
6170  return NULL;
6171 }
6172 
6182 void
6184  uint8_t *buff,
6185  uint32_t size)
6186 {
6187  uint32_t offset = 0, rec_length;
6188  uint8_t *rec_ptr;
6189 
6190  /*
6191  * If data size is less than 2 words signature and version cannot be
6192  * verified.
6193  */
6194  if (size < 2*sizeof(uint32_t))
6195  return;
6196 
6197  /* Check the region signature first */
6198  if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6200  "2567 Config region 23 has bad signature\n");
6201  return;
6202  }
6203 
6204  offset += 4;
6205 
6206  /* Check the data structure version */
6207  if (buff[offset] != LPFC_REGION23_VERSION) {
6209  "2568 Config region 23 has bad version\n");
6210  return;
6211  }
6212  offset += 4;
6213 
6214  rec_length = buff[offset + 1];
6215 
6216  /* Read FCoE param record */
6217  rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6218  size - offset, FCOE_PARAM_TYPE);
6219  if (rec_ptr)
6220  lpfc_read_fcoe_param(phba, rec_ptr);
6221 
6222  /* Read FCF connection table */
6223  rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6224  size - offset, FCOE_CONN_TBL_TYPE);
6225  if (rec_ptr)
6226  lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6227 
6228 }