Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_nportdisc.c
Go to the documentation of this file.
1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49  struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51  /* First, we MUST have a RPI registered */
52  if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53  return 0;
54 
55  /* Compare the ADISC rsp WWNN / WWPN matches our internal node
56  * table entry for that node.
57  */
58  if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59  return 0;
60 
61  if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62  return 0;
63 
64  /* we match, return success */
65  return 1;
66 }
67 
68 int
69 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70  struct serv_parm *sp, uint32_t class, int flogi)
71 {
72  volatile struct serv_parm *hsp = &vport->fc_sparam;
73  uint16_t hsp_value, ssp_value = 0;
74 
75  /*
76  * The receive data field size and buffer-to-buffer receive data field
77  * size entries are 16 bits but are represented as two 8-bit fields in
78  * the driver data structure to account for rsvd bits and other control
79  * bits. Reconstruct and compare the fields as a 16-bit values before
80  * correcting the byte values.
81  */
82  if (sp->cls1.classValid) {
83  if (!flogi) {
84  hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85  hsp->cls1.rcvDataSizeLsb);
86  ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87  sp->cls1.rcvDataSizeLsb);
88  if (!ssp_value)
89  goto bad_service_param;
90  if (ssp_value > hsp_value) {
91  sp->cls1.rcvDataSizeLsb =
92  hsp->cls1.rcvDataSizeLsb;
93  sp->cls1.rcvDataSizeMsb =
94  hsp->cls1.rcvDataSizeMsb;
95  }
96  }
97  } else if (class == CLASS1)
98  goto bad_service_param;
99  if (sp->cls2.classValid) {
100  if (!flogi) {
101  hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102  hsp->cls2.rcvDataSizeLsb);
103  ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104  sp->cls2.rcvDataSizeLsb);
105  if (!ssp_value)
106  goto bad_service_param;
107  if (ssp_value > hsp_value) {
108  sp->cls2.rcvDataSizeLsb =
109  hsp->cls2.rcvDataSizeLsb;
110  sp->cls2.rcvDataSizeMsb =
111  hsp->cls2.rcvDataSizeMsb;
112  }
113  }
114  } else if (class == CLASS2)
115  goto bad_service_param;
116  if (sp->cls3.classValid) {
117  if (!flogi) {
118  hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119  hsp->cls3.rcvDataSizeLsb);
120  ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121  sp->cls3.rcvDataSizeLsb);
122  if (!ssp_value)
123  goto bad_service_param;
124  if (ssp_value > hsp_value) {
125  sp->cls3.rcvDataSizeLsb =
126  hsp->cls3.rcvDataSizeLsb;
127  sp->cls3.rcvDataSizeMsb =
128  hsp->cls3.rcvDataSizeMsb;
129  }
130  }
131  } else if (class == CLASS3)
132  goto bad_service_param;
133 
134  /*
135  * Preserve the upper four bits of the MSB from the PLOGI response.
136  * These bits contain the Buffer-to-Buffer State Change Number
137  * from the target and need to be passed to the FW.
138  */
139  hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140  ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141  if (ssp_value > hsp_value) {
142  sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143  sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144  (hsp->cmn.bbRcvSizeMsb & 0x0F);
145  }
146 
147  memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148  memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149  return 1;
150 bad_service_param:
152  "0207 Device %x "
153  "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154  "invalid service parameters. Ignoring device.\n",
155  ndlp->nlp_DID,
156  sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157  sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158  sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159  sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160  return 0;
161 }
162 
163 static void *
164 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165  struct lpfc_iocbq *rspiocb)
166 {
167  struct lpfc_dmabuf *pcmd, *prsp;
168  uint32_t *lp;
169  void *ptr = NULL;
170  IOCB_t *irsp;
171 
172  irsp = &rspiocb->iocb;
173  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174 
175  /* For lpfc_els_abort, context2 could be zero'ed to delay
176  * freeing associated memory till after ABTS completes.
177  */
178  if (pcmd) {
179  prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
180  list);
181  if (prsp) {
182  lp = (uint32_t *) prsp->virt;
183  ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184  }
185  } else {
186  /* Force ulpStatus error since we are returning NULL ptr */
187  if (!(irsp->ulpStatus)) {
189  irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190  }
191  ptr = NULL;
192  }
193  return ptr;
194 }
195 
196 
197 
198 /*
199  * Free resources / clean up outstanding I/Os
200  * associated with a LPFC_NODELIST entry. This
201  * routine effectively results in a "software abort".
202  */
203 int
204 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205 {
206  LIST_HEAD(completions);
207  LIST_HEAD(txcmplq_completions);
208  LIST_HEAD(abort_list);
209  struct lpfc_sli *psli = &phba->sli;
210  struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
211  struct lpfc_iocbq *iocb, *next_iocb;
212 
213  /* Abort outstanding I/O on NPort <nlp_DID> */
215  "2819 Abort outstanding I/O on NPort x%x "
216  "Data: x%x x%x x%x\n",
217  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
218  ndlp->nlp_rpi);
219 
221 
222  /* First check the txq */
223  spin_lock_irq(&phba->hbalock);
224  list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
225  /* Check to see if iocb matches the nport we are looking for */
226  if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
227  /* It matches, so deque and call compl with anp error */
228  list_move_tail(&iocb->list, &completions);
229  pring->txq_cnt--;
230  }
231  }
232 
233  /* Next check the txcmplq */
234  list_splice_init(&pring->txcmplq, &txcmplq_completions);
235  spin_unlock_irq(&phba->hbalock);
236 
237  list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
238  /* Check to see if iocb matches the nport we are looking for */
239  if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
240  list_add_tail(&iocb->dlist, &abort_list);
241  }
242  spin_lock_irq(&phba->hbalock);
243  list_splice(&txcmplq_completions, &pring->txcmplq);
244  spin_unlock_irq(&phba->hbalock);
245 
246  list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
247  spin_lock_irq(&phba->hbalock);
248  list_del_init(&iocb->dlist);
249  lpfc_sli_issue_abort_iotag(phba, pring, iocb);
250  spin_unlock_irq(&phba->hbalock);
251  }
252 
253  /* Cancel all the IOCBs from the completions list */
254  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
256 
257  lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
258  return 0;
259 }
260 
261 static int
262 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
263  struct lpfc_iocbq *cmdiocb)
264 {
265  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
266  struct lpfc_hba *phba = vport->phba;
267  struct lpfc_dmabuf *pcmd;
268  uint32_t *lp;
269  IOCB_t *icmd;
270  struct serv_parm *sp;
272  struct ls_rjt stat;
273  int rc;
274 
275  memset(&stat, 0, sizeof (struct ls_rjt));
276  if (vport->port_state <= LPFC_FDISC) {
277  /* Before responding to PLOGI, check for pt2pt mode.
278  * If we are pt2pt, with an outstanding FLOGI, abort
279  * the FLOGI and resend it first.
280  */
281  if (vport->fc_flag & FC_PT2PT) {
282  lpfc_els_abort_flogi(phba);
283  if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
284  /* If the other side is supposed to initiate
285  * the PLOGI anyway, just ACC it now and
286  * move on with discovery.
287  */
288  phba->fc_edtov = FF_DEF_EDTOV;
289  phba->fc_ratov = FF_DEF_RATOV;
290  /* Start discovery - this should just do
291  CLEAR_LA */
292  lpfc_disc_start(vport);
293  } else
294  lpfc_initial_flogi(vport);
295  } else {
296  stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
297  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
298  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
299  ndlp, NULL);
300  return 0;
301  }
302  }
303  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
304  lp = (uint32_t *) pcmd->virt;
305  sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
306  if (wwn_to_u64(sp->portName.u.wwn) == 0) {
308  "0140 PLOGI Reject: invalid nname\n");
309  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
310  stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
311  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
312  NULL);
313  return 0;
314  }
315  if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
317  "0141 PLOGI Reject: invalid pname\n");
318  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
319  stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
320  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
321  NULL);
322  return 0;
323  }
324  if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
325  /* Reject this request because invalid parameters */
326  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
327  stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
328  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
329  NULL);
330  return 0;
331  }
332  icmd = &cmdiocb->iocb;
333 
334  /* PLOGI chkparm OK */
336  "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
337  ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
338  ndlp->nlp_rpi);
339 
340  if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
341  ndlp->nlp_fcp_info |= CLASS2;
342  else
343  ndlp->nlp_fcp_info |= CLASS3;
344 
345  ndlp->nlp_class_sup = 0;
346  if (sp->cls1.classValid)
347  ndlp->nlp_class_sup |= FC_COS_CLASS1;
348  if (sp->cls2.classValid)
349  ndlp->nlp_class_sup |= FC_COS_CLASS2;
350  if (sp->cls3.classValid)
351  ndlp->nlp_class_sup |= FC_COS_CLASS3;
352  if (sp->cls4.classValid)
353  ndlp->nlp_class_sup |= FC_COS_CLASS4;
354  ndlp->nlp_maxframe =
355  ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
356 
357  /* no need to reg_login if we are already in one of these states */
358  switch (ndlp->nlp_state) {
359  case NLP_STE_NPR_NODE:
360  if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
361  break;
363  case NLP_STE_PRLI_ISSUE:
365  case NLP_STE_MAPPED_NODE:
366  lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
367  return 1;
368  }
369 
370  /* Check for Nport to NPort pt2pt protocol */
371  if ((vport->fc_flag & FC_PT2PT) &&
372  !(vport->fc_flag & FC_PT2PT_PLOGI)) {
373 
374  /* rcv'ed PLOGI decides what our NPortId will be */
375  vport->fc_myDID = icmd->un.rcvels.parmRo;
376  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
377  if (mbox == NULL)
378  goto out;
379  lpfc_config_link(phba, mbox);
381  mbox->vport = vport;
382  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
383  if (rc == MBX_NOT_FINISHED) {
384  mempool_free(mbox, phba->mbox_mem_pool);
385  goto out;
386  }
387  /*
388  * For SLI4, the VFI/VPI are registered AFTER the
389  * Nport with the higher WWPN sends us a PLOGI with
390  * our assigned NPortId.
391  */
392  if (phba->sli_rev == LPFC_SLI_REV4)
393  lpfc_issue_reg_vfi(vport);
394 
395  lpfc_can_disctmo(vport);
396  }
397  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
398  if (!mbox)
399  goto out;
400 
401  /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
402  if (phba->sli_rev == LPFC_SLI_REV4)
403  lpfc_unreg_rpi(vport, ndlp);
404 
405  rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
406  (uint8_t *) sp, mbox, ndlp->nlp_rpi);
407  if (rc) {
408  mempool_free(mbox, phba->mbox_mem_pool);
409  goto out;
410  }
411 
412  /* ACC PLOGI rsp command needs to execute first,
413  * queue this mbox command to be processed later.
414  */
416  /*
417  * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
418  * command issued in lpfc_cmpl_els_acc().
419  */
420  mbox->vport = vport;
421  spin_lock_irq(shost->host_lock);
423  spin_unlock_irq(shost->host_lock);
424 
425  /*
426  * If there is an outstanding PLOGI issued, abort it before
427  * sending ACC rsp for received PLOGI. If pending plogi
428  * is not canceled here, the plogi will be rejected by
429  * remote port and will be retried. On a configuration with
430  * single discovery thread, this will cause a huge delay in
431  * discovery. Also this will cause multiple state machines
432  * running in parallel for this node.
433  */
434  if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
435  /* software abort outstanding PLOGI */
436  lpfc_els_abort(phba, ndlp);
437  }
438 
439  if ((vport->port_type == LPFC_NPIV_PORT &&
440  vport->cfg_restrict_login)) {
441 
442  /* In order to preserve RPIs, we want to cleanup
443  * the default RPI the firmware created to rcv
444  * this ELS request. The only way to do this is
445  * to register, then unregister the RPI.
446  */
447  spin_lock_irq(shost->host_lock);
448  ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
449  spin_unlock_irq(shost->host_lock);
450  stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
451  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
452  rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
453  ndlp, mbox);
454  if (rc)
455  mempool_free(mbox, phba->mbox_mem_pool);
456  return 1;
457  }
458  rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
459  if (rc)
460  mempool_free(mbox, phba->mbox_mem_pool);
461  return 1;
462 out:
463  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
464  stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
465  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
466  return 0;
467 }
468 
477 static void
478 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
479 {
480  struct lpfc_vport *vport;
481  struct lpfc_iocbq *elsiocb;
482  struct lpfc_nodelist *ndlp;
483  uint32_t cmd;
484 
485  elsiocb = (struct lpfc_iocbq *)mboxq->context1;
486  ndlp = (struct lpfc_nodelist *) mboxq->context2;
487  vport = mboxq->vport;
488  cmd = elsiocb->drvrTimeout;
489 
490  if (cmd == ELS_CMD_ADISC) {
491  lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
492  } else {
493  lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
494  ndlp, NULL);
495  }
496  kfree(elsiocb);
497  mempool_free(mboxq, phba->mbox_mem_pool);
498 }
499 
500 static int
501 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
502  struct lpfc_iocbq *cmdiocb)
503 {
504  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
505  struct lpfc_iocbq *elsiocb;
506  struct lpfc_dmabuf *pcmd;
507  struct serv_parm *sp;
508  struct lpfc_name *pnn, *ppn;
509  struct ls_rjt stat;
510  ADISC *ap;
511  IOCB_t *icmd;
512  uint32_t *lp;
513  uint32_t cmd;
514 
515  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
516  lp = (uint32_t *) pcmd->virt;
517 
518  cmd = *lp++;
519  if (cmd == ELS_CMD_ADISC) {
520  ap = (ADISC *) lp;
521  pnn = (struct lpfc_name *) & ap->nodeName;
522  ppn = (struct lpfc_name *) & ap->portName;
523  } else {
524  sp = (struct serv_parm *) lp;
525  pnn = (struct lpfc_name *) & sp->nodeName;
526  ppn = (struct lpfc_name *) & sp->portName;
527  }
528 
529  icmd = &cmdiocb->iocb;
530  if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
531 
532  /*
533  * As soon as we send ACC, the remote NPort can
534  * start sending us data. Thus, for SLI4 we must
535  * resume the RPI before the ACC goes out.
536  */
537  if (vport->phba->sli_rev == LPFC_SLI_REV4) {
538  elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
539  GFP_KERNEL);
540  if (elsiocb) {
541 
542  /* Save info from cmd IOCB used in rsp */
543  memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
544  sizeof(struct lpfc_iocbq));
545 
546  /* Save the ELS cmd */
547  elsiocb->drvrTimeout = cmd;
548 
550  lpfc_mbx_cmpl_resume_rpi, elsiocb);
551  goto out;
552  }
553  }
554 
555  if (cmd == ELS_CMD_ADISC) {
556  lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
557  } else {
558  lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
559  ndlp, NULL);
560  }
561 out:
562  /* If we are authenticated, move to the proper state */
563  if (ndlp->nlp_type & NLP_FCP_TARGET)
565  else
567 
568  return 1;
569  }
570  /* Reject this request because invalid parameters */
571  stat.un.b.lsRjtRsvd0 = 0;
572  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
573  stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
574  stat.un.b.vendorUnique = 0;
575  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
576 
577  /* 1 sec timeout */
578  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
579 
580  spin_lock_irq(shost->host_lock);
581  ndlp->nlp_flag |= NLP_DELAY_TMO;
582  spin_unlock_irq(shost->host_lock);
584  ndlp->nlp_prev_state = ndlp->nlp_state;
585  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
586  return 0;
587 }
588 
589 static int
590 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
591  struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
592 {
593  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
594  struct lpfc_hba *phba = vport->phba;
595  struct lpfc_vport **vports;
596  int i, active_vlink_present = 0 ;
597 
598  /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
599  /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
600  * PLOGIs during LOGO storms from a device.
601  */
602  spin_lock_irq(shost->host_lock);
603  ndlp->nlp_flag |= NLP_LOGO_ACC;
604  spin_unlock_irq(shost->host_lock);
605  if (els_cmd == ELS_CMD_PRLO)
606  lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
607  else
608  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
609  if (ndlp->nlp_DID == Fabric_DID) {
610  if (vport->port_state <= LPFC_FDISC)
611  goto out;
612  lpfc_linkdown_port(vport);
613  spin_lock_irq(shost->host_lock);
614  vport->fc_flag |= FC_VPORT_LOGO_RCVD;
615  spin_unlock_irq(shost->host_lock);
616  vports = lpfc_create_vport_work_array(phba);
617  if (vports) {
618  for (i = 0; i <= phba->max_vports && vports[i] != NULL;
619  i++) {
620  if ((!(vports[i]->fc_flag &
621  FC_VPORT_LOGO_RCVD)) &&
622  (vports[i]->port_state > LPFC_FDISC)) {
623  active_vlink_present = 1;
624  break;
625  }
626  }
627  lpfc_destroy_vport_work_array(phba, vports);
628  }
629 
630  if (active_vlink_present) {
631  /*
632  * If there are other active VLinks present,
633  * re-instantiate the Vlink using FDISC.
634  */
635  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
636  spin_lock_irq(shost->host_lock);
637  ndlp->nlp_flag |= NLP_DELAY_TMO;
638  spin_unlock_irq(shost->host_lock);
640  vport->port_state = LPFC_FDISC;
641  } else {
642  spin_lock_irq(shost->host_lock);
643  phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
644  spin_unlock_irq(shost->host_lock);
646  }
647  } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
648  ((ndlp->nlp_type & NLP_FCP_TARGET) ||
649  !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
650  (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
651  /* Only try to re-login if this is NOT a Fabric Node */
652  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
653  spin_lock_irq(shost->host_lock);
654  ndlp->nlp_flag |= NLP_DELAY_TMO;
655  spin_unlock_irq(shost->host_lock);
656 
658  }
659 out:
660  ndlp->nlp_prev_state = ndlp->nlp_state;
661  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
662 
663  spin_lock_irq(shost->host_lock);
664  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
665  spin_unlock_irq(shost->host_lock);
666  /* The driver has to wait until the ACC completes before it continues
667  * processing the LOGO. The action will resume in
668  * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
669  * unreg_login, the driver waits so the ACC does not get aborted.
670  */
671  return 0;
672 }
673 
674 static void
675 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
676  struct lpfc_iocbq *cmdiocb)
677 {
678  struct lpfc_dmabuf *pcmd;
679  uint32_t *lp;
680  PRLI *npr;
681  struct fc_rport *rport = ndlp->rport;
682  u32 roles;
683 
684  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
685  lp = (uint32_t *) pcmd->virt;
686  npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
687 
689  ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
690  if (npr->prliType == PRLI_FCP_TYPE) {
691  if (npr->initiatorFunc)
692  ndlp->nlp_type |= NLP_FCP_INITIATOR;
693  if (npr->targetFunc)
694  ndlp->nlp_type |= NLP_FCP_TARGET;
695  if (npr->Retry)
697  }
698  if (rport) {
699  /* We need to update the rport role values */
700  roles = FC_RPORT_ROLE_UNKNOWN;
701  if (ndlp->nlp_type & NLP_FCP_INITIATOR)
703  if (ndlp->nlp_type & NLP_FCP_TARGET)
704  roles |= FC_RPORT_ROLE_FCP_TARGET;
705 
707  "rport rolechg: role:x%x did:x%x flg:x%x",
708  roles, ndlp->nlp_DID, ndlp->nlp_flag);
709 
710  fc_remote_port_rolechg(rport, roles);
711  }
712 }
713 
714 static uint32_t
715 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
716 {
717  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
718 
719  if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
720  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
721  return 0;
722  }
723 
724  if (!(vport->fc_flag & FC_PT2PT)) {
725  /* Check config parameter use-adisc or FCP-2 */
726  if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
727  ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
728  (ndlp->nlp_type & NLP_FCP_TARGET))) {
729  spin_lock_irq(shost->host_lock);
730  ndlp->nlp_flag |= NLP_NPR_ADISC;
731  spin_unlock_irq(shost->host_lock);
732  return 1;
733  }
734  }
735  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
736  lpfc_unreg_rpi(vport, ndlp);
737  return 0;
738 }
739 
749 void
751  struct lpfc_vport *vport,
752  uint16_t rpi)
753 {
754  LPFC_MBOXQ_t *pmb;
755  int rc;
756 
757  pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
758  GFP_KERNEL);
759  if (!pmb)
761  "2796 mailbox memory allocation failed \n");
762  else {
763  lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
765  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
766  if (rc == MBX_NOT_FINISHED)
767  mempool_free(pmb, phba->mbox_mem_pool);
768  }
769 }
770 
771 static uint32_t
772 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
773  void *arg, uint32_t evt)
774 {
775  struct lpfc_hba *phba;
776  LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
777  MAILBOX_t *mb;
778  uint16_t rpi;
779 
780  phba = vport->phba;
781  /* Release the RPI if reglogin completing */
782  if (!(phba->pport->load_flag & FC_UNLOADING) &&
783  (evt == NLP_EVT_CMPL_REG_LOGIN) &&
784  (!pmb->u.mb.mbxStatus)) {
785  mb = &pmb->u.mb;
786  rpi = pmb->u.mb.un.varWords[0];
787  lpfc_release_rpi(phba, vport, rpi);
788  }
790  "0271 Illegal State Transition: node x%x "
791  "event x%x, state x%x Data: x%x x%x\n",
792  ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
793  ndlp->nlp_flag);
794  return ndlp->nlp_state;
795 }
796 
797 static uint32_t
798 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
799  void *arg, uint32_t evt)
800 {
801  /* This transition is only legal if we previously
802  * rcv'ed a PLOGI. Since we don't want 2 discovery threads
803  * working on the same NPortID, do nothing for this thread
804  * to stop it.
805  */
806  if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
808  "0272 Illegal State Transition: node x%x "
809  "event x%x, state x%x Data: x%x x%x\n",
810  ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
811  ndlp->nlp_flag);
812  }
813  return ndlp->nlp_state;
814 }
815 
816 /* Start of Discovery State Machine routines */
817 
818 static uint32_t
819 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
820  void *arg, uint32_t evt)
821 {
822  struct lpfc_iocbq *cmdiocb;
823 
824  cmdiocb = (struct lpfc_iocbq *) arg;
825 
826  if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
827  return ndlp->nlp_state;
828  }
829  return NLP_STE_FREED_NODE;
830 }
831 
832 static uint32_t
833 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
834  void *arg, uint32_t evt)
835 {
836  lpfc_issue_els_logo(vport, ndlp, 0);
837  return ndlp->nlp_state;
838 }
839 
840 static uint32_t
841 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
842  void *arg, uint32_t evt)
843 {
844  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
845  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
846 
847  spin_lock_irq(shost->host_lock);
848  ndlp->nlp_flag |= NLP_LOGO_ACC;
849  spin_unlock_irq(shost->host_lock);
850  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
851 
852  return ndlp->nlp_state;
853 }
854 
855 static uint32_t
856 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
857  void *arg, uint32_t evt)
858 {
859  return NLP_STE_FREED_NODE;
860 }
861 
862 static uint32_t
863 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
864  void *arg, uint32_t evt)
865 {
866  return NLP_STE_FREED_NODE;
867 }
868 
869 static uint32_t
870 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
871  struct lpfc_nodelist *ndlp,
872  void *arg, uint32_t evt)
873 {
874  return ndlp->nlp_state;
875 }
876 
877 static uint32_t
878 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
879  void *arg, uint32_t evt)
880 {
881  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
882  struct lpfc_hba *phba = vport->phba;
883  struct lpfc_iocbq *cmdiocb = arg;
884  struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
885  uint32_t *lp = (uint32_t *) pcmd->virt;
886  struct serv_parm *sp = (struct serv_parm *) (lp + 1);
887  struct ls_rjt stat;
888  int port_cmp;
889 
890  memset(&stat, 0, sizeof (struct ls_rjt));
891 
892  /* For a PLOGI, we only accept if our portname is less
893  * than the remote portname.
894  */
895  phba->fc_stat.elsLogiCol++;
896  port_cmp = memcmp(&vport->fc_portname, &sp->portName,
897  sizeof(struct lpfc_name));
898 
899  if (port_cmp >= 0) {
900  /* Reject this request because the remote node will accept
901  ours */
902  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
903  stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
904  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
905  NULL);
906  } else {
907  if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
908  (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
909  (vport->num_disc_nodes)) {
910  spin_lock_irq(shost->host_lock);
911  ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
912  spin_unlock_irq(shost->host_lock);
913  /* Check if there are more PLOGIs to be sent */
914  lpfc_more_plogi(vport);
915  if (vport->num_disc_nodes == 0) {
916  spin_lock_irq(shost->host_lock);
917  vport->fc_flag &= ~FC_NDISC_ACTIVE;
918  spin_unlock_irq(shost->host_lock);
919  lpfc_can_disctmo(vport);
920  lpfc_end_rscn(vport);
921  }
922  }
923  } /* If our portname was less */
924 
925  return ndlp->nlp_state;
926 }
927 
928 static uint32_t
929 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
930  void *arg, uint32_t evt)
931 {
932  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
933  struct ls_rjt stat;
934 
935  memset(&stat, 0, sizeof (struct ls_rjt));
936  stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
937  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
938  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
939  return ndlp->nlp_state;
940 }
941 
942 static uint32_t
943 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
944  void *arg, uint32_t evt)
945 {
946  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
947 
948  /* software abort outstanding PLOGI */
949  lpfc_els_abort(vport->phba, ndlp);
950 
951  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
952  return ndlp->nlp_state;
953 }
954 
955 static uint32_t
956 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
957  void *arg, uint32_t evt)
958 {
959  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
960  struct lpfc_hba *phba = vport->phba;
961  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
962 
963  /* software abort outstanding PLOGI */
964  lpfc_els_abort(phba, ndlp);
965 
966  if (evt == NLP_EVT_RCV_LOGO) {
967  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
968  } else {
969  lpfc_issue_els_logo(vport, ndlp, 0);
970  }
971 
972  /* Put ndlp in npr state set plogi timer for 1 sec */
973  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
974  spin_lock_irq(shost->host_lock);
975  ndlp->nlp_flag |= NLP_DELAY_TMO;
976  spin_unlock_irq(shost->host_lock);
979  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
980 
981  return ndlp->nlp_state;
982 }
983 
984 static uint32_t
985 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
986  struct lpfc_nodelist *ndlp,
987  void *arg,
988  uint32_t evt)
989 {
990  struct lpfc_hba *phba = vport->phba;
991  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
992  struct lpfc_iocbq *cmdiocb, *rspiocb;
993  struct lpfc_dmabuf *pcmd, *prsp, *mp;
994  uint32_t *lp;
995  IOCB_t *irsp;
996  struct serv_parm *sp;
998 
999  cmdiocb = (struct lpfc_iocbq *) arg;
1000  rspiocb = cmdiocb->context_un.rsp_iocb;
1001 
1002  if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1003  /* Recovery from PLOGI collision logic */
1004  return ndlp->nlp_state;
1005  }
1006 
1007  irsp = &rspiocb->iocb;
1008 
1009  if (irsp->ulpStatus)
1010  goto out;
1011 
1012  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1013 
1014  prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1015 
1016  lp = (uint32_t *) prsp->virt;
1017  sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1018 
1019  /* Some switches have FDMI servers returning 0 for WWN */
1020  if ((ndlp->nlp_DID != FDMI_DID) &&
1021  (wwn_to_u64(sp->portName.u.wwn) == 0 ||
1022  wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1024  "0142 PLOGI RSP: Invalid WWN.\n");
1025  goto out;
1026  }
1027  if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1028  goto out;
1029  /* PLOGI chkparm OK */
1031  "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1032  ndlp->nlp_DID, ndlp->nlp_state,
1033  ndlp->nlp_flag, ndlp->nlp_rpi);
1034  if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1035  ndlp->nlp_fcp_info |= CLASS2;
1036  else
1037  ndlp->nlp_fcp_info |= CLASS3;
1038 
1039  ndlp->nlp_class_sup = 0;
1040  if (sp->cls1.classValid)
1041  ndlp->nlp_class_sup |= FC_COS_CLASS1;
1042  if (sp->cls2.classValid)
1043  ndlp->nlp_class_sup |= FC_COS_CLASS2;
1044  if (sp->cls3.classValid)
1045  ndlp->nlp_class_sup |= FC_COS_CLASS3;
1046  if (sp->cls4.classValid)
1047  ndlp->nlp_class_sup |= FC_COS_CLASS4;
1048  ndlp->nlp_maxframe =
1049  ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1050 
1051  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1052  if (!mbox) {
1054  "0133 PLOGI: no memory for reg_login "
1055  "Data: x%x x%x x%x x%x\n",
1056  ndlp->nlp_DID, ndlp->nlp_state,
1057  ndlp->nlp_flag, ndlp->nlp_rpi);
1058  goto out;
1059  }
1060 
1061  lpfc_unreg_rpi(vport, ndlp);
1062 
1063  if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1064  (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1065  switch (ndlp->nlp_DID) {
1066  case NameServer_DID:
1068  break;
1069  case FDMI_DID:
1071  break;
1072  default:
1073  ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1075  }
1076  mbox->context2 = lpfc_nlp_get(ndlp);
1077  mbox->vport = vport;
1078  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1079  != MBX_NOT_FINISHED) {
1080  lpfc_nlp_set_state(vport, ndlp,
1082  return ndlp->nlp_state;
1083  }
1084  if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1085  ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1086  /* decrement node reference count to the failed mbox
1087  * command
1088  */
1089  lpfc_nlp_put(ndlp);
1090  mp = (struct lpfc_dmabuf *) mbox->context1;
1091  lpfc_mbuf_free(phba, mp->virt, mp->phys);
1092  kfree(mp);
1093  mempool_free(mbox, phba->mbox_mem_pool);
1094 
1096  "0134 PLOGI: cannot issue reg_login "
1097  "Data: x%x x%x x%x x%x\n",
1098  ndlp->nlp_DID, ndlp->nlp_state,
1099  ndlp->nlp_flag, ndlp->nlp_rpi);
1100  } else {
1101  mempool_free(mbox, phba->mbox_mem_pool);
1102 
1104  "0135 PLOGI: cannot format reg_login "
1105  "Data: x%x x%x x%x x%x\n",
1106  ndlp->nlp_DID, ndlp->nlp_state,
1107  ndlp->nlp_flag, ndlp->nlp_rpi);
1108  }
1109 
1110 
1111 out:
1112  if (ndlp->nlp_DID == NameServer_DID) {
1115  "0261 Cannot Register NameServer login\n");
1116  }
1117 
1118  spin_lock_irq(shost->host_lock);
1119  ndlp->nlp_flag |= NLP_DEFER_RM;
1120  spin_unlock_irq(shost->host_lock);
1121  return NLP_STE_FREED_NODE;
1122 }
1123 
1124 static uint32_t
1125 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1126  void *arg, uint32_t evt)
1127 {
1128  return ndlp->nlp_state;
1129 }
1130 
1131 static uint32_t
1132 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1133  struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1134 {
1135  struct lpfc_hba *phba;
1136  LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1137  MAILBOX_t *mb = &pmb->u.mb;
1138  uint16_t rpi;
1139 
1140  phba = vport->phba;
1141  /* Release the RPI */
1142  if (!(phba->pport->load_flag & FC_UNLOADING) &&
1143  !mb->mbxStatus) {
1144  rpi = pmb->u.mb.un.varWords[0];
1145  lpfc_release_rpi(phba, vport, rpi);
1146  }
1147  return ndlp->nlp_state;
1148 }
1149 
1150 static uint32_t
1151 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1152  void *arg, uint32_t evt)
1153 {
1154  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1155 
1156  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1157  spin_lock_irq(shost->host_lock);
1158  ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1159  spin_unlock_irq(shost->host_lock);
1160  return ndlp->nlp_state;
1161  } else {
1162  /* software abort outstanding PLOGI */
1163  lpfc_els_abort(vport->phba, ndlp);
1164 
1165  lpfc_drop_node(vport, ndlp);
1166  return NLP_STE_FREED_NODE;
1167  }
1168 }
1169 
1170 static uint32_t
1171 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1172  struct lpfc_nodelist *ndlp,
1173  void *arg,
1174  uint32_t evt)
1175 {
1176  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177  struct lpfc_hba *phba = vport->phba;
1178 
1179  /* Don't do anything that will mess up processing of the
1180  * previous RSCN.
1181  */
1182  if (vport->fc_flag & FC_RSCN_DEFERRED)
1183  return ndlp->nlp_state;
1184 
1185  /* software abort outstanding PLOGI */
1186  lpfc_els_abort(phba, ndlp);
1187 
1189  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1190  spin_lock_irq(shost->host_lock);
1192  spin_unlock_irq(shost->host_lock);
1193 
1194  return ndlp->nlp_state;
1195 }
1196 
1197 static uint32_t
1198 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1199  void *arg, uint32_t evt)
1200 {
1201  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1202  struct lpfc_hba *phba = vport->phba;
1203  struct lpfc_iocbq *cmdiocb;
1204 
1205  /* software abort outstanding ADISC */
1206  lpfc_els_abort(phba, ndlp);
1207 
1208  cmdiocb = (struct lpfc_iocbq *) arg;
1209 
1210  if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1211  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1212  spin_lock_irq(shost->host_lock);
1213  ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1214  spin_unlock_irq(shost->host_lock);
1215  if (vport->num_disc_nodes)
1216  lpfc_more_adisc(vport);
1217  }
1218  return ndlp->nlp_state;
1219  }
1221  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1223 
1224  return ndlp->nlp_state;
1225 }
1226 
1227 static uint32_t
1228 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1229  void *arg, uint32_t evt)
1230 {
1231  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1232 
1233  lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1234  return ndlp->nlp_state;
1235 }
1236 
1237 static uint32_t
1238 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1239  void *arg, uint32_t evt)
1240 {
1241  struct lpfc_hba *phba = vport->phba;
1242  struct lpfc_iocbq *cmdiocb;
1243 
1244  cmdiocb = (struct lpfc_iocbq *) arg;
1245 
1246  /* software abort outstanding ADISC */
1247  lpfc_els_abort(phba, ndlp);
1248 
1249  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1250  return ndlp->nlp_state;
1251 }
1252 
1253 static uint32_t
1254 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1255  struct lpfc_nodelist *ndlp,
1256  void *arg, uint32_t evt)
1257 {
1258  struct lpfc_iocbq *cmdiocb;
1259 
1260  cmdiocb = (struct lpfc_iocbq *) arg;
1261 
1262  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1263  return ndlp->nlp_state;
1264 }
1265 
1266 static uint32_t
1267 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1268  void *arg, uint32_t evt)
1269 {
1270  struct lpfc_iocbq *cmdiocb;
1271 
1272  cmdiocb = (struct lpfc_iocbq *) arg;
1273 
1274  /* Treat like rcv logo */
1275  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1276  return ndlp->nlp_state;
1277 }
1278 
1279 static uint32_t
1280 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1281  struct lpfc_nodelist *ndlp,
1282  void *arg, uint32_t evt)
1283 {
1284  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1285  struct lpfc_hba *phba = vport->phba;
1286  struct lpfc_iocbq *cmdiocb, *rspiocb;
1287  IOCB_t *irsp;
1288  ADISC *ap;
1289  int rc;
1290 
1291  cmdiocb = (struct lpfc_iocbq *) arg;
1292  rspiocb = cmdiocb->context_un.rsp_iocb;
1293 
1294  ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1295  irsp = &rspiocb->iocb;
1296 
1297  if ((irsp->ulpStatus) ||
1298  (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1299  /* 1 sec timeout */
1300  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1301  spin_lock_irq(shost->host_lock);
1302  ndlp->nlp_flag |= NLP_DELAY_TMO;
1303  spin_unlock_irq(shost->host_lock);
1305 
1306  memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1307  memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1308 
1310  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1311  lpfc_unreg_rpi(vport, ndlp);
1312  return ndlp->nlp_state;
1313  }
1314 
1315  if (phba->sli_rev == LPFC_SLI_REV4) {
1316  rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1317  if (rc) {
1318  /* Stay in state and retry. */
1320  return ndlp->nlp_state;
1321  }
1322  }
1323 
1324  if (ndlp->nlp_type & NLP_FCP_TARGET) {
1327  } else {
1330  }
1331 
1332  return ndlp->nlp_state;
1333 }
1334 
1335 static uint32_t
1336 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1337  void *arg, uint32_t evt)
1338 {
1339  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1340 
1341  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1342  spin_lock_irq(shost->host_lock);
1343  ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1344  spin_unlock_irq(shost->host_lock);
1345  return ndlp->nlp_state;
1346  } else {
1347  /* software abort outstanding ADISC */
1348  lpfc_els_abort(vport->phba, ndlp);
1349 
1350  lpfc_drop_node(vport, ndlp);
1351  return NLP_STE_FREED_NODE;
1352  }
1353 }
1354 
1355 static uint32_t
1356 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1357  struct lpfc_nodelist *ndlp,
1358  void *arg,
1359  uint32_t evt)
1360 {
1361  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1362  struct lpfc_hba *phba = vport->phba;
1363 
1364  /* Don't do anything that will mess up processing of the
1365  * previous RSCN.
1366  */
1367  if (vport->fc_flag & FC_RSCN_DEFERRED)
1368  return ndlp->nlp_state;
1369 
1370  /* software abort outstanding ADISC */
1371  lpfc_els_abort(phba, ndlp);
1372 
1374  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1375  spin_lock_irq(shost->host_lock);
1377  spin_unlock_irq(shost->host_lock);
1378  lpfc_disc_set_adisc(vport, ndlp);
1379  return ndlp->nlp_state;
1380 }
1381 
1382 static uint32_t
1383 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1384  struct lpfc_nodelist *ndlp,
1385  void *arg,
1386  uint32_t evt)
1387 {
1388  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1389 
1390  lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1391  return ndlp->nlp_state;
1392 }
1393 
1394 static uint32_t
1395 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1396  struct lpfc_nodelist *ndlp,
1397  void *arg,
1398  uint32_t evt)
1399 {
1400  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1401 
1402  lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1403  return ndlp->nlp_state;
1404 }
1405 
1406 static uint32_t
1407 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1408  struct lpfc_nodelist *ndlp,
1409  void *arg,
1410  uint32_t evt)
1411 {
1412  struct lpfc_hba *phba = vport->phba;
1413  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1414  LPFC_MBOXQ_t *mb;
1415  LPFC_MBOXQ_t *nextmb;
1416  struct lpfc_dmabuf *mp;
1417 
1418  cmdiocb = (struct lpfc_iocbq *) arg;
1419 
1420  /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1421  if ((mb = phba->sli.mbox_active)) {
1422  if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1423  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1424  lpfc_nlp_put(ndlp);
1425  mb->context2 = NULL;
1427  }
1428  }
1429 
1430  spin_lock_irq(&phba->hbalock);
1431  list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1432  if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1433  (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1434  mp = (struct lpfc_dmabuf *) (mb->context1);
1435  if (mp) {
1436  __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1437  kfree(mp);
1438  }
1439  lpfc_nlp_put(ndlp);
1440  list_del(&mb->list);
1441  phba->sli.mboxq_cnt--;
1442  mempool_free(mb, phba->mbox_mem_pool);
1443  }
1444  }
1445  spin_unlock_irq(&phba->hbalock);
1446 
1447  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1448  return ndlp->nlp_state;
1449 }
1450 
1451 static uint32_t
1452 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1453  struct lpfc_nodelist *ndlp,
1454  void *arg,
1455  uint32_t evt)
1456 {
1457  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1458 
1459  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1460  return ndlp->nlp_state;
1461 }
1462 
1463 static uint32_t
1464 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1465  struct lpfc_nodelist *ndlp,
1466  void *arg,
1467  uint32_t evt)
1468 {
1469  struct lpfc_iocbq *cmdiocb;
1470 
1471  cmdiocb = (struct lpfc_iocbq *) arg;
1472  lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1473  return ndlp->nlp_state;
1474 }
1475 
1476 static uint32_t
1477 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1478  struct lpfc_nodelist *ndlp,
1479  void *arg,
1480  uint32_t evt)
1481 {
1482  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1483  LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1484  MAILBOX_t *mb = &pmb->u.mb;
1485  uint32_t did = mb->un.varWords[1];
1486 
1487  if (mb->mbxStatus) {
1488  /* RegLogin failed */
1490  "0246 RegLogin failed Data: x%x x%x x%x x%x "
1491  "x%x\n",
1492  did, mb->mbxStatus, vport->port_state,
1493  mb->un.varRegLogin.vpi,
1494  mb->un.varRegLogin.rpi);
1495  /*
1496  * If RegLogin failed due to lack of HBA resources do not
1497  * retry discovery.
1498  */
1499  if (mb->mbxStatus == MBXERR_RPI_FULL) {
1501  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1502  return ndlp->nlp_state;
1503  }
1504 
1505  /* Put ndlp in npr state set plogi timer for 1 sec */
1506  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1507  spin_lock_irq(shost->host_lock);
1508  ndlp->nlp_flag |= NLP_DELAY_TMO;
1509  spin_unlock_irq(shost->host_lock);
1511 
1512  lpfc_issue_els_logo(vport, ndlp, 0);
1514  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1515  return ndlp->nlp_state;
1516  }
1517 
1518  /* SLI4 ports have preallocated logical rpis. */
1519  if (vport->phba->sli_rev < LPFC_SLI_REV4)
1520  ndlp->nlp_rpi = mb->un.varWords[0];
1521 
1522  ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1523 
1524  /* Only if we are not a fabric nport do we issue PRLI */
1525  if (!(ndlp->nlp_type & NLP_FABRIC)) {
1527  lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1528  lpfc_issue_els_prli(vport, ndlp, 0);
1529  } else {
1532  }
1533  return ndlp->nlp_state;
1534 }
1535 
1536 static uint32_t
1537 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1538  struct lpfc_nodelist *ndlp,
1539  void *arg,
1540  uint32_t evt)
1541 {
1542  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1543 
1544  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1545  spin_lock_irq(shost->host_lock);
1546  ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1547  spin_unlock_irq(shost->host_lock);
1548  return ndlp->nlp_state;
1549  } else {
1550  lpfc_drop_node(vport, ndlp);
1551  return NLP_STE_FREED_NODE;
1552  }
1553 }
1554 
1555 static uint32_t
1556 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1557  struct lpfc_nodelist *ndlp,
1558  void *arg,
1559  uint32_t evt)
1560 {
1561  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1562 
1563  /* Don't do anything that will mess up processing of the
1564  * previous RSCN.
1565  */
1566  if (vport->fc_flag & FC_RSCN_DEFERRED)
1567  return ndlp->nlp_state;
1568 
1570  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1571  spin_lock_irq(shost->host_lock);
1572  ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1574  spin_unlock_irq(shost->host_lock);
1575  lpfc_disc_set_adisc(vport, ndlp);
1576  return ndlp->nlp_state;
1577 }
1578 
1579 static uint32_t
1580 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1581  void *arg, uint32_t evt)
1582 {
1583  struct lpfc_iocbq *cmdiocb;
1584 
1585  cmdiocb = (struct lpfc_iocbq *) arg;
1586 
1587  lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1588  return ndlp->nlp_state;
1589 }
1590 
1591 static uint32_t
1592 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1593  void *arg, uint32_t evt)
1594 {
1595  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1596 
1597  lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1598  return ndlp->nlp_state;
1599 }
1600 
1601 static uint32_t
1602 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1603  void *arg, uint32_t evt)
1604 {
1605  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1606 
1607  /* Software abort outstanding PRLI before sending acc */
1608  lpfc_els_abort(vport->phba, ndlp);
1609 
1610  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1611  return ndlp->nlp_state;
1612 }
1613 
1614 static uint32_t
1615 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1616  void *arg, uint32_t evt)
1617 {
1618  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1619 
1620  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1621  return ndlp->nlp_state;
1622 }
1623 
1624 /* This routine is envoked when we rcv a PRLO request from a nport
1625  * we are logged into. We should send back a PRLO rsp setting the
1626  * appropriate bits.
1627  * NEXT STATE = PRLI_ISSUE
1628  */
1629 static uint32_t
1630 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1631  void *arg, uint32_t evt)
1632 {
1633  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1634 
1635  lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1636  return ndlp->nlp_state;
1637 }
1638 
1639 static uint32_t
1640 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1641  void *arg, uint32_t evt)
1642 {
1643  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1644  struct lpfc_iocbq *cmdiocb, *rspiocb;
1645  struct lpfc_hba *phba = vport->phba;
1646  IOCB_t *irsp;
1647  PRLI *npr;
1648 
1649  cmdiocb = (struct lpfc_iocbq *) arg;
1650  rspiocb = cmdiocb->context_un.rsp_iocb;
1651  npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1652 
1653  irsp = &rspiocb->iocb;
1654  if (irsp->ulpStatus) {
1655  if ((vport->port_type == LPFC_NPIV_PORT) &&
1656  vport->cfg_restrict_login) {
1657  goto out;
1658  }
1661  return ndlp->nlp_state;
1662  }
1663 
1664  /* Check out PRLI rsp */
1666  ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1667  if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1668  (npr->prliType == PRLI_FCP_TYPE)) {
1669  if (npr->initiatorFunc)
1670  ndlp->nlp_type |= NLP_FCP_INITIATOR;
1671  if (npr->targetFunc)
1672  ndlp->nlp_type |= NLP_FCP_TARGET;
1673  if (npr->Retry)
1674  ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1675  }
1676  if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1677  (vport->port_type == LPFC_NPIV_PORT) &&
1678  vport->cfg_restrict_login) {
1679 out:
1680  spin_lock_irq(shost->host_lock);
1681  ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1682  spin_unlock_irq(shost->host_lock);
1683  lpfc_issue_els_logo(vport, ndlp, 0);
1684 
1686  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1687  return ndlp->nlp_state;
1688  }
1689 
1691  if (ndlp->nlp_type & NLP_FCP_TARGET)
1693  else
1695  return ndlp->nlp_state;
1696 }
1697 
1716 static uint32_t
1717 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1718  void *arg, uint32_t evt)
1719 {
1720  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1721 
1722  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1723  spin_lock_irq(shost->host_lock);
1724  ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1725  spin_unlock_irq(shost->host_lock);
1726  return ndlp->nlp_state;
1727  } else {
1728  /* software abort outstanding PLOGI */
1729  lpfc_els_abort(vport->phba, ndlp);
1730 
1731  lpfc_drop_node(vport, ndlp);
1732  return NLP_STE_FREED_NODE;
1733  }
1734 }
1735 
1736 
1753 static uint32_t
1754 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1755  struct lpfc_nodelist *ndlp,
1756  void *arg,
1757  uint32_t evt)
1758 {
1759  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1760  struct lpfc_hba *phba = vport->phba;
1761 
1762  /* Don't do anything that will mess up processing of the
1763  * previous RSCN.
1764  */
1765  if (vport->fc_flag & FC_RSCN_DEFERRED)
1766  return ndlp->nlp_state;
1767 
1768  /* software abort outstanding PRLI */
1769  lpfc_els_abort(phba, ndlp);
1770 
1772  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1773  spin_lock_irq(shost->host_lock);
1775  spin_unlock_irq(shost->host_lock);
1776  lpfc_disc_set_adisc(vport, ndlp);
1777  return ndlp->nlp_state;
1778 }
1779 
1780 static uint32_t
1781 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1782  void *arg, uint32_t evt)
1783 {
1784  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1785  struct ls_rjt stat;
1786 
1787  memset(&stat, 0, sizeof(struct ls_rjt));
1788  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1789  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1790  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1791  return ndlp->nlp_state;
1792 }
1793 
1794 static uint32_t
1795 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1796  void *arg, uint32_t evt)
1797 {
1798  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1799  struct ls_rjt stat;
1800 
1801  memset(&stat, 0, sizeof(struct ls_rjt));
1802  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1803  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1804  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1805  return ndlp->nlp_state;
1806 }
1807 
1808 static uint32_t
1809 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1810  void *arg, uint32_t evt)
1811 {
1812  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1813  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1814 
1815  spin_lock_irq(shost->host_lock);
1816  ndlp->nlp_flag &= NLP_LOGO_ACC;
1817  spin_unlock_irq(shost->host_lock);
1818  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1819  return ndlp->nlp_state;
1820 }
1821 
1822 static uint32_t
1823 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1824  void *arg, uint32_t evt)
1825 {
1826  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1827  struct ls_rjt stat;
1828 
1829  memset(&stat, 0, sizeof(struct ls_rjt));
1830  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1831  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1832  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1833  return ndlp->nlp_state;
1834 }
1835 
1836 static uint32_t
1837 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1838  void *arg, uint32_t evt)
1839 {
1840  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1841  struct ls_rjt stat;
1842 
1843  memset(&stat, 0, sizeof(struct ls_rjt));
1844  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1845  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1846  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1847  return ndlp->nlp_state;
1848 }
1849 
1850 static uint32_t
1851 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1852  void *arg, uint32_t evt)
1853 {
1854  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1855 
1857  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1858  spin_lock_irq(shost->host_lock);
1860  spin_unlock_irq(shost->host_lock);
1861  lpfc_disc_set_adisc(vport, ndlp);
1862  return ndlp->nlp_state;
1863 }
1864 
1865 static uint32_t
1866 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1867  void *arg, uint32_t evt)
1868 {
1869  /*
1870  * Take no action. If a LOGO is outstanding, then possibly DevLoss has
1871  * timed out and is calling for Device Remove. In this case, the LOGO
1872  * must be allowed to complete in state LOGO_ISSUE so that the rpi
1873  * and other NLP flags are correctly cleaned up.
1874  */
1875  return ndlp->nlp_state;
1876 }
1877 
1878 static uint32_t
1879 lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1880  struct lpfc_nodelist *ndlp,
1881  void *arg, uint32_t evt)
1882 {
1883  /*
1884  * Device Recovery events have no meaning for a node with a LOGO
1885  * outstanding. The LOGO has to complete first and handle the
1886  * node from that point.
1887  */
1888  return ndlp->nlp_state;
1889 }
1890 
1891 static uint32_t
1892 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1893  void *arg, uint32_t evt)
1894 {
1895  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1896 
1897  lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1898  return ndlp->nlp_state;
1899 }
1900 
1901 static uint32_t
1902 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1903  void *arg, uint32_t evt)
1904 {
1905  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1906 
1907  lpfc_rcv_prli(vport, ndlp, cmdiocb);
1908  lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1909  return ndlp->nlp_state;
1910 }
1911 
1912 static uint32_t
1913 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1914  void *arg, uint32_t evt)
1915 {
1916  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1917 
1918  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1919  return ndlp->nlp_state;
1920 }
1921 
1922 static uint32_t
1923 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1924  void *arg, uint32_t evt)
1925 {
1926  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1927 
1928  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1929  return ndlp->nlp_state;
1930 }
1931 
1932 static uint32_t
1933 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1934  void *arg, uint32_t evt)
1935 {
1936  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1937 
1938  lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1939  return ndlp->nlp_state;
1940 }
1941 
1942 static uint32_t
1943 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1944  struct lpfc_nodelist *ndlp,
1945  void *arg,
1946  uint32_t evt)
1947 {
1948  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1949 
1951  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1952  spin_lock_irq(shost->host_lock);
1954  spin_unlock_irq(shost->host_lock);
1955  lpfc_disc_set_adisc(vport, ndlp);
1956 
1957  return ndlp->nlp_state;
1958 }
1959 
1960 static uint32_t
1961 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1962  void *arg, uint32_t evt)
1963 {
1964  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1965 
1966  lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1967  return ndlp->nlp_state;
1968 }
1969 
1970 static uint32_t
1971 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1972  void *arg, uint32_t evt)
1973 {
1974  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1975 
1976  lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1977  return ndlp->nlp_state;
1978 }
1979 
1980 static uint32_t
1981 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1982  void *arg, uint32_t evt)
1983 {
1984  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1985 
1986  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1987  return ndlp->nlp_state;
1988 }
1989 
1990 static uint32_t
1991 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1992  struct lpfc_nodelist *ndlp,
1993  void *arg, uint32_t evt)
1994 {
1995  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1996 
1997  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1998  return ndlp->nlp_state;
1999 }
2000 
2001 static uint32_t
2002 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2003  void *arg, uint32_t evt)
2004 {
2005  struct lpfc_hba *phba = vport->phba;
2006  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2007 
2008  /* flush the target */
2009  lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2010  ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2011 
2012  /* Treat like rcv logo */
2013  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2014  return ndlp->nlp_state;
2015 }
2016 
2017 static uint32_t
2018 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2019  struct lpfc_nodelist *ndlp,
2020  void *arg,
2021  uint32_t evt)
2022 {
2023  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2024 
2026  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2027  spin_lock_irq(shost->host_lock);
2029  spin_unlock_irq(shost->host_lock);
2030  lpfc_disc_set_adisc(vport, ndlp);
2031  return ndlp->nlp_state;
2032 }
2033 
2034 static uint32_t
2035 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2036  void *arg, uint32_t evt)
2037 {
2038  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2039  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2040 
2041  /* Ignore PLOGI if we have an outstanding LOGO */
2042  if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2043  return ndlp->nlp_state;
2044  if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2045  lpfc_cancel_retry_delay_tmo(vport, ndlp);
2046  spin_lock_irq(shost->host_lock);
2047  ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2048  spin_unlock_irq(shost->host_lock);
2049  } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2050  /* send PLOGI immediately, move to PLOGI issue state */
2051  if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2054  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2055  }
2056  }
2057  return ndlp->nlp_state;
2058 }
2059 
2060 static uint32_t
2061 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2062  void *arg, uint32_t evt)
2063 {
2064  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2065  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2066  struct ls_rjt stat;
2067 
2068  memset(&stat, 0, sizeof (struct ls_rjt));
2069  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2070  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2071  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2072 
2073  if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2074  if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2075  spin_lock_irq(shost->host_lock);
2076  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2078  spin_unlock_irq(shost->host_lock);
2080  lpfc_issue_els_adisc(vport, ndlp, 0);
2081  } else {
2084  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2085  }
2086  }
2087  return ndlp->nlp_state;
2088 }
2089 
2090 static uint32_t
2091 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2092  void *arg, uint32_t evt)
2093 {
2094  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2095 
2096  lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2097  return ndlp->nlp_state;
2098 }
2099 
2100 static uint32_t
2101 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2102  void *arg, uint32_t evt)
2103 {
2104  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2105 
2106  lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2107  /*
2108  * Do not start discovery if discovery is about to start
2109  * or discovery in progress for this node. Starting discovery
2110  * here will affect the counting of discovery threads.
2111  */
2112  if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2113  !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2114  if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2115  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2118  lpfc_issue_els_adisc(vport, ndlp, 0);
2119  } else {
2122  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2123  }
2124  }
2125  return ndlp->nlp_state;
2126 }
2127 
2128 static uint32_t
2129 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2130  void *arg, uint32_t evt)
2131 {
2132  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2133  struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2134 
2135  spin_lock_irq(shost->host_lock);
2136  ndlp->nlp_flag |= NLP_LOGO_ACC;
2137  spin_unlock_irq(shost->host_lock);
2138 
2139  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2140 
2141  if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2142  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
2143  spin_lock_irq(shost->host_lock);
2144  ndlp->nlp_flag |= NLP_DELAY_TMO;
2145  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2146  spin_unlock_irq(shost->host_lock);
2148  } else {
2149  spin_lock_irq(shost->host_lock);
2150  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2151  spin_unlock_irq(shost->host_lock);
2152  }
2153  return ndlp->nlp_state;
2154 }
2155 
2156 static uint32_t
2157 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2158  void *arg, uint32_t evt)
2159 {
2160  struct lpfc_iocbq *cmdiocb, *rspiocb;
2161  IOCB_t *irsp;
2162 
2163  cmdiocb = (struct lpfc_iocbq *) arg;
2164  rspiocb = cmdiocb->context_un.rsp_iocb;
2165 
2166  irsp = &rspiocb->iocb;
2167  if (irsp->ulpStatus) {
2168  ndlp->nlp_flag |= NLP_DEFER_RM;
2169  return NLP_STE_FREED_NODE;
2170  }
2171  return ndlp->nlp_state;
2172 }
2173 
2174 static uint32_t
2175 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2176  void *arg, uint32_t evt)
2177 {
2178  struct lpfc_iocbq *cmdiocb, *rspiocb;
2179  IOCB_t *irsp;
2180 
2181  cmdiocb = (struct lpfc_iocbq *) arg;
2182  rspiocb = cmdiocb->context_un.rsp_iocb;
2183 
2184  irsp = &rspiocb->iocb;
2185  if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2186  lpfc_drop_node(vport, ndlp);
2187  return NLP_STE_FREED_NODE;
2188  }
2189  return ndlp->nlp_state;
2190 }
2191 
2192 static uint32_t
2193 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2194  void *arg, uint32_t evt)
2195 {
2196  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2197 
2198  /* For the fabric port just clear the fc flags. */
2199  if (ndlp->nlp_DID == Fabric_DID) {
2200  spin_lock_irq(shost->host_lock);
2201  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2202  spin_unlock_irq(shost->host_lock);
2203  }
2204  lpfc_unreg_rpi(vport, ndlp);
2205  return ndlp->nlp_state;
2206 }
2207 
2208 static uint32_t
2209 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2210  void *arg, uint32_t evt)
2211 {
2212  struct lpfc_iocbq *cmdiocb, *rspiocb;
2213  IOCB_t *irsp;
2214 
2215  cmdiocb = (struct lpfc_iocbq *) arg;
2216  rspiocb = cmdiocb->context_un.rsp_iocb;
2217 
2218  irsp = &rspiocb->iocb;
2219  if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2220  lpfc_drop_node(vport, ndlp);
2221  return NLP_STE_FREED_NODE;
2222  }
2223  return ndlp->nlp_state;
2224 }
2225 
2226 static uint32_t
2227 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2228  struct lpfc_nodelist *ndlp,
2229  void *arg, uint32_t evt)
2230 {
2231  LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2232  MAILBOX_t *mb = &pmb->u.mb;
2233 
2234  if (!mb->mbxStatus) {
2235  /* SLI4 ports have preallocated logical rpis. */
2236  if (vport->phba->sli_rev < LPFC_SLI_REV4)
2237  ndlp->nlp_rpi = mb->un.varWords[0];
2238  ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2239  } else {
2240  if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2241  lpfc_drop_node(vport, ndlp);
2242  return NLP_STE_FREED_NODE;
2243  }
2244  }
2245  return ndlp->nlp_state;
2246 }
2247 
2248 static uint32_t
2249 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2250  void *arg, uint32_t evt)
2251 {
2252  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2253 
2254  if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2255  spin_lock_irq(shost->host_lock);
2256  ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2257  spin_unlock_irq(shost->host_lock);
2258  return ndlp->nlp_state;
2259  }
2260  lpfc_drop_node(vport, ndlp);
2261  return NLP_STE_FREED_NODE;
2262 }
2263 
2264 static uint32_t
2265 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2266  void *arg, uint32_t evt)
2267 {
2268  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2269 
2270  /* Don't do anything that will mess up processing of the
2271  * previous RSCN.
2272  */
2273  if (vport->fc_flag & FC_RSCN_DEFERRED)
2274  return ndlp->nlp_state;
2275 
2276  lpfc_cancel_retry_delay_tmo(vport, ndlp);
2277  spin_lock_irq(shost->host_lock);
2279  spin_unlock_irq(shost->host_lock);
2280  return ndlp->nlp_state;
2281 }
2282 
2283 
2284 /* This next section defines the NPort Discovery State Machine */
2285 
2286 /* There are 4 different double linked lists nodelist entries can reside on.
2287  * The plogi list and adisc list are used when Link Up discovery or RSCN
2288  * processing is needed. Each list holds the nodes that we will send PLOGI
2289  * or ADISC on. These lists will keep track of what nodes will be effected
2290  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2291  * The unmapped_list will contain all nodes that we have successfully logged
2292  * into at the Fibre Channel level. The mapped_list will contain all nodes
2293  * that are mapped FCP targets.
2294  */
2295 /*
2296  * The bind list is a list of undiscovered (potentially non-existent) nodes
2297  * that we have saved binding information on. This information is used when
2298  * nodes transition from the unmapped to the mapped list.
2299  */
2300 /* For UNUSED_NODE state, the node has just been allocated .
2301  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2302  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2303  * and put on the unmapped list. For ADISC processing, the node is taken off
2304  * the ADISC list and placed on either the mapped or unmapped list (depending
2305  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2306  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2307  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2308  * node, the node is taken off the unmapped list. The binding list is checked
2309  * for a valid binding, or a binding is automatically assigned. If binding
2310  * assignment is unsuccessful, the node is left on the unmapped list. If
2311  * binding assignment is successful, the associated binding list entry (if
2312  * any) is removed, and the node is placed on the mapped list.
2313  */
2314 /*
2315  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2316  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2317  * expire, all effected nodes will receive a DEVICE_RM event.
2318  */
2319 /*
2320  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2321  * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
2322  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2323  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2324  * we will first process the ADISC list. 32 entries are processed initially and
2325  * ADISC is initited for each one. Completions / Events for each node are
2326  * funnelled thru the state machine. As each node finishes ADISC processing, it
2327  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2328  * waiting, and the ADISC list count is identically 0, then we are done. For
2329  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2330  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2331  * list. 32 entries are processed initially and PLOGI is initited for each one.
2332  * Completions / Events for each node are funnelled thru the state machine. As
2333  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2334  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2335  * indentically 0, then we are done. We have now completed discovery / RSCN
2336  * handling. Upon completion, ALL nodes should be on either the mapped or
2337  * unmapped lists.
2338  */
2339 
2340 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2341  (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2342  /* Action routine Event Current State */
2343  lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
2344  lpfc_rcv_els_unused_node, /* RCV_PRLI */
2345  lpfc_rcv_logo_unused_node, /* RCV_LOGO */
2346  lpfc_rcv_els_unused_node, /* RCV_ADISC */
2347  lpfc_rcv_els_unused_node, /* RCV_PDISC */
2348  lpfc_rcv_els_unused_node, /* RCV_PRLO */
2349  lpfc_disc_illegal, /* CMPL_PLOGI */
2350  lpfc_disc_illegal, /* CMPL_PRLI */
2351  lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
2352  lpfc_disc_illegal, /* CMPL_ADISC */
2353  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2354  lpfc_device_rm_unused_node, /* DEVICE_RM */
2355  lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
2356 
2357  lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
2358  lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
2359  lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
2360  lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
2361  lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
2362  lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
2363  lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
2364  lpfc_disc_illegal, /* CMPL_PRLI */
2365  lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
2366  lpfc_disc_illegal, /* CMPL_ADISC */
2367  lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
2368  lpfc_device_rm_plogi_issue, /* DEVICE_RM */
2369  lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
2370 
2371  lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
2372  lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
2373  lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
2374  lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
2375  lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
2376  lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
2377  lpfc_disc_illegal, /* CMPL_PLOGI */
2378  lpfc_disc_illegal, /* CMPL_PRLI */
2379  lpfc_disc_illegal, /* CMPL_LOGO */
2380  lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
2381  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2382  lpfc_device_rm_adisc_issue, /* DEVICE_RM */
2383  lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
2384 
2385  lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
2386  lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
2387  lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
2388  lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
2389  lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
2390  lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
2391  lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2392  lpfc_disc_illegal, /* CMPL_PRLI */
2393  lpfc_disc_illegal, /* CMPL_LOGO */
2394  lpfc_disc_illegal, /* CMPL_ADISC */
2395  lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
2396  lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
2397  lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2398 
2399  lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
2400  lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
2401  lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
2402  lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
2403  lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
2404  lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
2405  lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2406  lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
2407  lpfc_disc_illegal, /* CMPL_LOGO */
2408  lpfc_disc_illegal, /* CMPL_ADISC */
2409  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2410  lpfc_device_rm_prli_issue, /* DEVICE_RM */
2411  lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
2412 
2413  lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
2414  lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
2415  lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
2416  lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
2417  lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
2418  lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
2419  lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2420  lpfc_disc_illegal, /* CMPL_PRLI */
2421  lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
2422  lpfc_disc_illegal, /* CMPL_ADISC */
2423  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2424  lpfc_device_rm_logo_issue, /* DEVICE_RM */
2425  lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
2426 
2427  lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
2428  lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
2429  lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
2430  lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
2431  lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
2432  lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
2433  lpfc_disc_illegal, /* CMPL_PLOGI */
2434  lpfc_disc_illegal, /* CMPL_PRLI */
2435  lpfc_disc_illegal, /* CMPL_LOGO */
2436  lpfc_disc_illegal, /* CMPL_ADISC */
2437  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2438  lpfc_disc_illegal, /* DEVICE_RM */
2439  lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
2440 
2441  lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
2442  lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
2443  lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
2444  lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
2445  lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
2446  lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
2447  lpfc_disc_illegal, /* CMPL_PLOGI */
2448  lpfc_disc_illegal, /* CMPL_PRLI */
2449  lpfc_disc_illegal, /* CMPL_LOGO */
2450  lpfc_disc_illegal, /* CMPL_ADISC */
2451  lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2452  lpfc_disc_illegal, /* DEVICE_RM */
2453  lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
2454 
2455  lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
2456  lpfc_rcv_prli_npr_node, /* RCV_PRLI */
2457  lpfc_rcv_logo_npr_node, /* RCV_LOGO */
2458  lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
2459  lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
2460  lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
2461  lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
2462  lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
2463  lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
2464  lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
2465  lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
2466  lpfc_device_rm_npr_node, /* DEVICE_RM */
2467  lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
2468 };
2469 
2470 int
2472  void *arg, uint32_t evt)
2473 {
2474  uint32_t cur_state, rc;
2475  uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2476  uint32_t);
2477  uint32_t got_ndlp = 0;
2478 
2479  if (lpfc_nlp_get(ndlp))
2480  got_ndlp = 1;
2481 
2482  cur_state = ndlp->nlp_state;
2483 
2484  /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2486  "0211 DSM in event x%x on NPort x%x in "
2487  "state %d Data: x%x\n",
2488  evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2489 
2491  "DSM in: evt:%d ste:%d did:x%x",
2492  evt, cur_state, ndlp->nlp_DID);
2493 
2494  func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2495  rc = (func) (vport, ndlp, arg, evt);
2496 
2497  /* DSM out state <rc> on NPort <nlp_DID> */
2498  if (got_ndlp) {
2500  "0212 DSM out state %d on NPort x%x Data: x%x\n",
2501  rc, ndlp->nlp_DID, ndlp->nlp_flag);
2502 
2504  "DSM out: ste:%d did:x%x flg:x%x",
2505  rc, ndlp->nlp_DID, ndlp->nlp_flag);
2506  /* Decrement the ndlp reference count held for this function */
2507  lpfc_nlp_put(ndlp);
2508  } else {
2510  "0213 DSM out state %d on NPort free\n", rc);
2511 
2513  "DSM out: ste:%d did:x%x flg:x%x",
2514  rc, 0, 0);
2515  }
2516 
2517  return rc;
2518 }