Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_els.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 /* See Fibre Channel protocol T11 FC-LS for details */
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46  struct lpfc_iocbq *);
47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48  struct lpfc_iocbq *);
49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51  struct lpfc_nodelist *ndlp, uint8_t retry);
52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53  struct lpfc_iocbq *iocb);
54 
55 static int lpfc_max_els_tries = 3;
56 
79 int
81 {
82  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83  struct lpfc_hba *phba = vport->phba;
85 
86  if (vport->port_state >= LPFC_VPORT_READY ||
87  phba->link_state == LPFC_LINK_DOWN ||
88  phba->sli_rev > LPFC_SLI_REV3)
89  return 0;
90 
91  /* Read the HBA Host Attention Register */
92  if (lpfc_readl(phba->HAregaddr, &ha_copy))
93  return 1;
94 
95  if (!(ha_copy & HA_LATT))
96  return 0;
97 
98  /* Pending Link Event during Discovery */
100  "0237 Pending Link Event during "
101  "Discovery: State x%x\n",
102  phba->pport->port_state);
103 
104  /* CLEAR_LA should re-enable link attention events and
105  * we should then immediately take a LATT event. The
106  * LATT processing should call lpfc_linkdown() which
107  * will cleanup any left over in-progress discovery
108  * events.
109  */
110  spin_lock_irq(shost->host_lock);
111  vport->fc_flag |= FC_ABORT_DISCOVERY;
112  spin_unlock_irq(shost->host_lock);
113 
114  if (phba->link_state != LPFC_CLEAR_LA)
115  lpfc_issue_clear_la(phba, vport);
116 
117  return 1;
118 }
119 
148 struct lpfc_iocbq *
150  uint16_t cmdSize, uint8_t retry,
151  struct lpfc_nodelist *ndlp, uint32_t did,
152  uint32_t elscmd)
153 {
154  struct lpfc_hba *phba = vport->phba;
155  struct lpfc_iocbq *elsiocb;
156  struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
157  struct ulp_bde64 *bpl;
158  IOCB_t *icmd;
159 
160 
161  if (!lpfc_is_link_up(phba))
162  return NULL;
163 
164  /* Allocate buffer for command iocb */
165  elsiocb = lpfc_sli_get_iocbq(phba);
166 
167  if (elsiocb == NULL)
168  return NULL;
169 
170  /*
171  * If this command is for fabric controller and HBA running
172  * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
173  */
174  if ((did == Fabric_DID) &&
175  (phba->hba_flag & HBA_FIP_SUPPORT) &&
176  ((elscmd == ELS_CMD_FLOGI) ||
177  (elscmd == ELS_CMD_FDISC) ||
178  (elscmd == ELS_CMD_LOGO)))
179  switch (elscmd) {
180  case ELS_CMD_FLOGI:
181  elsiocb->iocb_flag |=
184  break;
185  case ELS_CMD_FDISC:
186  elsiocb->iocb_flag |=
189  break;
190  case ELS_CMD_LOGO:
191  elsiocb->iocb_flag |=
194  break;
195  }
196  else
197  elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
198 
199  icmd = &elsiocb->iocb;
200 
201  /* fill in BDEs for command */
202  /* Allocate buffer for command payload */
203  pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
204  if (pcmd)
205  pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
206  if (!pcmd || !pcmd->virt)
207  goto els_iocb_free_pcmb_exit;
208 
209  INIT_LIST_HEAD(&pcmd->list);
210 
211  /* Allocate buffer for response payload */
212  if (expectRsp) {
213  prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
214  if (prsp)
215  prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
216  &prsp->phys);
217  if (!prsp || !prsp->virt)
218  goto els_iocb_free_prsp_exit;
219  INIT_LIST_HEAD(&prsp->list);
220  } else
221  prsp = NULL;
222 
223  /* Allocate buffer for Buffer ptr list */
224  pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
225  if (pbuflist)
226  pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
227  &pbuflist->phys);
228  if (!pbuflist || !pbuflist->virt)
229  goto els_iocb_free_pbuf_exit;
230 
231  INIT_LIST_HEAD(&pbuflist->list);
232 
233  if (expectRsp) {
234  icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
235  icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
237  icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
238 
239  icmd->un.elsreq64.remoteID = did; /* DID */
241  icmd->ulpTimeout = phba->fc_ratov * 2;
242  } else {
243  icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
244  icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
246  icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
247  icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
249  }
250  icmd->ulpBdeCount = 1;
251  icmd->ulpLe = 1;
252  icmd->ulpClass = CLASS3;
253 
254  /*
255  * If we have NPIV enabled, we want to send ELS traffic by VPI.
256  * For SLI4, since the driver controls VPIs we also want to include
257  * all ELS pt2pt protocol traffic as well.
258  */
259  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
260  ((phba->sli_rev == LPFC_SLI_REV4) &&
261  (vport->fc_flag & FC_PT2PT))) {
262 
263  if (expectRsp) {
264  icmd->un.elsreq64.myID = vport->fc_myDID;
265 
266  /* For ELS_REQUEST64_CR, use the VPI by default */
267  icmd->ulpContext = phba->vpi_ids[vport->vpi];
268  }
269 
270  icmd->ulpCt_h = 0;
271  /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
272  if (elscmd == ELS_CMD_ECHO)
273  icmd->ulpCt_l = 0; /* context = invalid RPI */
274  else
275  icmd->ulpCt_l = 1; /* context = VPI */
276  }
277 
278  bpl = (struct ulp_bde64 *) pbuflist->virt;
279  bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
280  bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
281  bpl->tus.f.bdeSize = cmdSize;
282  bpl->tus.f.bdeFlags = 0;
283  bpl->tus.w = le32_to_cpu(bpl->tus.w);
284 
285  if (expectRsp) {
286  bpl++;
287  bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
288  bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
289  bpl->tus.f.bdeSize = FCELSSIZE;
290  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
291  bpl->tus.w = le32_to_cpu(bpl->tus.w);
292  }
293 
294  /* prevent preparing iocb with NULL ndlp reference */
295  elsiocb->context1 = lpfc_nlp_get(ndlp);
296  if (!elsiocb->context1)
297  goto els_iocb_free_pbuf_exit;
298  elsiocb->context2 = pcmd;
299  elsiocb->context3 = pbuflist;
300  elsiocb->retry = retry;
301  elsiocb->vport = vport;
302  elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
303 
304  if (prsp) {
305  list_add(&prsp->list, &pcmd->list);
306  }
307  if (expectRsp) {
308  /* Xmit ELS command <elsCmd> to remote NPORT <did> */
310  "0116 Xmit ELS command x%x to remote "
311  "NPORT x%x I/O tag: x%x, port state: x%x\n",
312  elscmd, did, elsiocb->iotag,
313  vport->port_state);
314  } else {
315  /* Xmit ELS response <elsCmd> to remote NPORT <did> */
317  "0117 Xmit ELS response x%x to remote "
318  "NPORT x%x I/O tag: x%x, size: x%x\n",
319  elscmd, ndlp->nlp_DID, elsiocb->iotag,
320  cmdSize);
321  }
322  return elsiocb;
323 
324 els_iocb_free_pbuf_exit:
325  if (expectRsp)
326  lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
327  kfree(pbuflist);
328 
329 els_iocb_free_prsp_exit:
330  lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
331  kfree(prsp);
332 
333 els_iocb_free_pcmb_exit:
334  kfree(pcmd);
335  lpfc_sli_release_iocbq(phba, elsiocb);
336  return NULL;
337 }
338 
355 int
357 {
358  struct lpfc_hba *phba = vport->phba;
360  struct lpfc_dmabuf *mp;
361  struct lpfc_nodelist *ndlp;
362  struct serv_parm *sp;
363  int rc;
364  int err = 0;
365 
366  sp = &phba->fc_fabparam;
367  ndlp = lpfc_findnode_did(vport, Fabric_DID);
368  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
369  err = 1;
370  goto fail;
371  }
372 
373  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
374  if (!mbox) {
375  err = 2;
376  goto fail;
377  }
378 
380  lpfc_config_link(phba, mbox);
382  mbox->vport = vport;
383 
384  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
385  if (rc == MBX_NOT_FINISHED) {
386  err = 3;
387  goto fail_free_mbox;
388  }
389 
390  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
391  if (!mbox) {
392  err = 4;
393  goto fail;
394  }
395  rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
396  ndlp->nlp_rpi);
397  if (rc) {
398  err = 5;
399  goto fail_free_mbox;
400  }
401 
403  mbox->vport = vport;
404  /* increment the reference count on ndlp to hold reference
405  * for the callback routine.
406  */
407  mbox->context2 = lpfc_nlp_get(ndlp);
408 
409  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
410  if (rc == MBX_NOT_FINISHED) {
411  err = 6;
412  goto fail_issue_reg_login;
413  }
414 
415  return 0;
416 
417 fail_issue_reg_login:
418  /* decrement the reference count on ndlp just incremented
419  * for the failed mbox command.
420  */
421  lpfc_nlp_put(ndlp);
422  mp = (struct lpfc_dmabuf *) mbox->context1;
423  lpfc_mbuf_free(phba, mp->virt, mp->phys);
424  kfree(mp);
425 fail_free_mbox:
426  mempool_free(mbox, phba->mbox_mem_pool);
427 
428 fail:
431  "0249 Cannot issue Register Fabric login: Err %d\n", err);
432  return -ENXIO;
433 }
434 
446 int
448 {
449  struct lpfc_hba *phba = vport->phba;
450  LPFC_MBOXQ_t *mboxq;
451  struct lpfc_nodelist *ndlp;
452  struct serv_parm *sp;
453  struct lpfc_dmabuf *dmabuf;
454  int rc = 0;
455 
456  sp = &phba->fc_fabparam;
457  /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
458  if ((phba->sli_rev == LPFC_SLI_REV4) &&
459  !(phba->link_flag & LS_LOOPBACK_MODE) &&
460  !(vport->fc_flag & FC_PT2PT)) {
461  ndlp = lpfc_findnode_did(vport, Fabric_DID);
462  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
463  rc = -ENODEV;
464  goto fail;
465  }
466  }
467 
468  dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
469  if (!dmabuf) {
470  rc = -ENOMEM;
471  goto fail;
472  }
473  dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
474  if (!dmabuf->virt) {
475  rc = -ENOMEM;
476  goto fail_free_dmabuf;
477  }
478 
479  mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
480  if (!mboxq) {
481  rc = -ENOMEM;
482  goto fail_free_coherent;
483  }
485  memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
486  lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
488  mboxq->vport = vport;
489  mboxq->context1 = dmabuf;
490  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
491  if (rc == MBX_NOT_FINISHED) {
492  rc = -ENXIO;
493  goto fail_free_mbox;
494  }
495  return 0;
496 
497 fail_free_mbox:
498  mempool_free(mboxq, phba->mbox_mem_pool);
499 fail_free_coherent:
500  lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
501 fail_free_dmabuf:
502  kfree(dmabuf);
503 fail:
506  "0289 Issue Register VFI failed: Err %d\n", rc);
507  return rc;
508 }
509 
521 int
523 {
524  struct lpfc_hba *phba = vport->phba;
525  struct Scsi_Host *shost;
526  LPFC_MBOXQ_t *mboxq;
527  int rc;
528 
529  mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
530  if (!mboxq) {
532  "2556 UNREG_VFI mbox allocation failed"
533  "HBA state x%x\n", phba->pport->port_state);
534  return -ENOMEM;
535  }
536 
537  lpfc_unreg_vfi(mboxq, vport);
538  mboxq->vport = vport;
540 
541  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
542  if (rc == MBX_NOT_FINISHED) {
544  "2557 UNREG_VFI issue mbox failed rc x%x "
545  "HBA state x%x\n",
546  rc, phba->pport->port_state);
547  mempool_free(mboxq, phba->mbox_mem_pool);
548  return -EIO;
549  }
550 
551  shost = lpfc_shost_from_vport(vport);
552  spin_lock_irq(shost->host_lock);
553  vport->fc_flag &= ~FC_VFI_REGISTERED;
554  spin_unlock_irq(shost->host_lock);
555  return 0;
556 }
557 
576 static uint8_t
577 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
578  struct serv_parm *sp)
579 {
580  uint8_t fabric_param_changed = 0;
581  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
582 
583  if ((vport->fc_prevDID != vport->fc_myDID) ||
584  memcmp(&vport->fabric_portname, &sp->portName,
585  sizeof(struct lpfc_name)) ||
586  memcmp(&vport->fabric_nodename, &sp->nodeName,
587  sizeof(struct lpfc_name)))
588  fabric_param_changed = 1;
589 
590  /*
591  * Word 1 Bit 31 in common service parameter is overloaded.
592  * Word 1 Bit 31 in FLOGI request is multiple NPort request
593  * Word 1 Bit 31 in FLOGI response is clean address bit
594  *
595  * If fabric parameter is changed and clean address bit is
596  * cleared delay nport discovery if
597  * - vport->fc_prevDID != 0 (not initial discovery) OR
598  * - lpfc_delay_discovery module parameter is set.
599  */
600  if (fabric_param_changed && !sp->cmn.clean_address_bit &&
601  (vport->fc_prevDID || lpfc_delay_discovery)) {
602  spin_lock_irq(shost->host_lock);
603  vport->fc_flag |= FC_DISC_DELAYED;
604  spin_unlock_irq(shost->host_lock);
605  }
606 
607  return fabric_param_changed;
608 }
609 
610 
631 static int
632 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
633  struct serv_parm *sp, IOCB_t *irsp)
634 {
635  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
636  struct lpfc_hba *phba = vport->phba;
637  struct lpfc_nodelist *np;
638  struct lpfc_nodelist *next_np;
639  uint8_t fabric_param_changed;
640 
641  spin_lock_irq(shost->host_lock);
642  vport->fc_flag |= FC_FABRIC;
643  spin_unlock_irq(shost->host_lock);
644 
645  phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
646  if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
647  phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
648 
649  phba->fc_edtovResol = sp->cmn.edtovResolution;
650  phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
651 
652  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
653  spin_lock_irq(shost->host_lock);
654  vport->fc_flag |= FC_PUBLIC_LOOP;
655  spin_unlock_irq(shost->host_lock);
656  }
657 
658  vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
659  memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
660  memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
661  ndlp->nlp_class_sup = 0;
662  if (sp->cls1.classValid)
663  ndlp->nlp_class_sup |= FC_COS_CLASS1;
664  if (sp->cls2.classValid)
665  ndlp->nlp_class_sup |= FC_COS_CLASS2;
666  if (sp->cls3.classValid)
667  ndlp->nlp_class_sup |= FC_COS_CLASS3;
668  if (sp->cls4.classValid)
669  ndlp->nlp_class_sup |= FC_COS_CLASS4;
670  ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
671  sp->cmn.bbRcvSizeLsb;
672 
673  fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
674  memcpy(&vport->fabric_portname, &sp->portName,
675  sizeof(struct lpfc_name));
676  memcpy(&vport->fabric_nodename, &sp->nodeName,
677  sizeof(struct lpfc_name));
678  memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
679 
680  if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
681  if (sp->cmn.response_multiple_NPort) {
683  LOG_ELS | LOG_VPORT,
684  "1816 FLOGI NPIV supported, "
685  "response data 0x%x\n",
686  sp->cmn.response_multiple_NPort);
687  spin_lock_irq(&phba->hbalock);
689  spin_unlock_irq(&phba->hbalock);
690  } else {
691  /* Because we asked f/w for NPIV it still expects us
692  to call reg_vnpid atleast for the physcial host */
694  LOG_ELS | LOG_VPORT,
695  "1817 Fabric does not support NPIV "
696  "- configuring single port mode.\n");
697  spin_lock_irq(&phba->hbalock);
699  spin_unlock_irq(&phba->hbalock);
700  }
701  }
702 
703  if (fabric_param_changed &&
704  !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
705 
706  /* If our NportID changed, we need to ensure all
707  * remaining NPORTs get unreg_login'ed.
708  */
709  list_for_each_entry_safe(np, next_np,
710  &vport->fc_nodes, nlp_listp) {
711  if (!NLP_CHK_NODE_ACT(np))
712  continue;
713  if ((np->nlp_state != NLP_STE_NPR_NODE) ||
714  !(np->nlp_flag & NLP_NPR_ADISC))
715  continue;
716  spin_lock_irq(shost->host_lock);
717  np->nlp_flag &= ~NLP_NPR_ADISC;
718  spin_unlock_irq(shost->host_lock);
719  lpfc_unreg_rpi(vport, np);
720  }
722 
723  if (phba->sli_rev == LPFC_SLI_REV4) {
725  lpfc_mbx_unreg_vpi(vport);
726  spin_lock_irq(shost->host_lock);
728  spin_unlock_irq(shost->host_lock);
729  }
730 
731  /*
732  * For SLI3 and SLI4, the VPI needs to be reregistered in
733  * response to this fabric parameter change event.
734  */
735  spin_lock_irq(shost->host_lock);
737  spin_unlock_irq(shost->host_lock);
738  } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
739  !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
740  /*
741  * Driver needs to re-reg VPI in order for f/w
742  * to update the MAC address.
743  */
745  lpfc_register_new_vport(phba, vport, ndlp);
746  return 0;
747  }
748 
749  if (phba->sli_rev < LPFC_SLI_REV4) {
751  if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
753  lpfc_register_new_vport(phba, vport, ndlp);
754  else
756  } else {
757  ndlp->nlp_type |= NLP_FABRIC;
759  if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
760  (vport->vpi_state & LPFC_VPI_REGISTERED)) {
761  lpfc_start_fdiscs(phba);
762  lpfc_do_scr_ns_plogi(phba, vport);
763  } else if (vport->fc_flag & FC_VFI_REGISTERED)
764  lpfc_issue_init_vpi(vport);
765  else {
767  "3135 Need register VFI: (x%x/%x)\n",
768  vport->fc_prevDID, vport->fc_myDID);
769  lpfc_issue_reg_vfi(vport);
770  }
771  }
772  return 0;
773 }
774 
795 static int
796 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
797  struct serv_parm *sp)
798 {
799  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
800  struct lpfc_hba *phba = vport->phba;
802  int rc;
803 
804  spin_lock_irq(shost->host_lock);
805  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
806  spin_unlock_irq(shost->host_lock);
807 
808  phba->fc_edtov = FF_DEF_EDTOV;
809  phba->fc_ratov = FF_DEF_RATOV;
810  rc = memcmp(&vport->fc_portname, &sp->portName,
811  sizeof(vport->fc_portname));
812  memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
813 
814  if (rc >= 0) {
815  /* This side will initiate the PLOGI */
816  spin_lock_irq(shost->host_lock);
817  vport->fc_flag |= FC_PT2PT_PLOGI;
818  spin_unlock_irq(shost->host_lock);
819 
820  /*
821  * N_Port ID cannot be 0, set our to LocalID the other
822  * side will be RemoteID.
823  */
824 
825  /* not equal */
826  if (rc)
827  vport->fc_myDID = PT2PT_LocalID;
828 
829  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
830  if (!mbox)
831  goto fail;
832 
833  lpfc_config_link(phba, mbox);
834 
836  mbox->vport = vport;
837  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
838  if (rc == MBX_NOT_FINISHED) {
839  mempool_free(mbox, phba->mbox_mem_pool);
840  goto fail;
841  }
842 
843  /*
844  * For SLI4, the VFI/VPI are registered AFTER the
845  * Nport with the higher WWPN sends the PLOGI with
846  * an assigned NPortId.
847  */
848 
849  /* not equal */
850  if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
851  lpfc_issue_reg_vfi(vport);
852 
853  /* Decrement ndlp reference count indicating that ndlp can be
854  * safely released when other references to it are done.
855  */
856  lpfc_nlp_put(ndlp);
857 
858  ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
859  if (!ndlp) {
860  /*
861  * Cannot find existing Fabric ndlp, so allocate a
862  * new one
863  */
864  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
865  if (!ndlp)
866  goto fail;
867  lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
868  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
869  ndlp = lpfc_enable_node(vport, ndlp,
871  if(!ndlp)
872  goto fail;
873  }
874 
875  memcpy(&ndlp->nlp_portname, &sp->portName,
876  sizeof(struct lpfc_name));
877  memcpy(&ndlp->nlp_nodename, &sp->nodeName,
878  sizeof(struct lpfc_name));
879  /* Set state will put ndlp onto node list if not already done */
880  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
881  spin_lock_irq(shost->host_lock);
882  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
883  spin_unlock_irq(shost->host_lock);
884  } else
885  /* This side will wait for the PLOGI, decrement ndlp reference
886  * count indicating that ndlp can be released when other
887  * references to it are done.
888  */
889  lpfc_nlp_put(ndlp);
890 
891  /* If we are pt2pt with another NPort, force NPIV off! */
893 
894  spin_lock_irq(shost->host_lock);
895  vport->fc_flag |= FC_PT2PT;
896  spin_unlock_irq(shost->host_lock);
897 
898  /* Start discovery - this should just do CLEAR_LA */
899  lpfc_disc_start(vport);
900  return 0;
901 fail:
902  return -ENXIO;
903 }
904 
928 static void
929 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
930  struct lpfc_iocbq *rspiocb)
931 {
932  struct lpfc_vport *vport = cmdiocb->vport;
933  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
934  IOCB_t *irsp = &rspiocb->iocb;
935  struct lpfc_nodelist *ndlp = cmdiocb->context1;
936  struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
937  struct serv_parm *sp;
938  uint16_t fcf_index;
939  int rc;
940 
941  /* Check to see if link went down during discovery */
942  if (lpfc_els_chk_latt(vport)) {
943  /* One additional decrement on node reference count to
944  * trigger the release of the node
945  */
946  lpfc_nlp_put(ndlp);
947  goto out;
948  }
949 
951  "FLOGI cmpl: status:x%x/x%x state:x%x",
952  irsp->ulpStatus, irsp->un.ulpWord[4],
953  vport->port_state);
954 
955  if (irsp->ulpStatus) {
956  /*
957  * In case of FIP mode, perform roundrobin FCF failover
958  * due to new FCF discovery
959  */
960  if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
961  (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
962  if (phba->link_state < LPFC_LINK_UP)
963  goto stop_rr_fcf_flogi;
964  if ((phba->fcoe_cvl_eventtag_attn ==
965  phba->fcoe_cvl_eventtag) &&
966  (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
967  ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
969  goto stop_rr_fcf_flogi;
970  else
971  phba->fcoe_cvl_eventtag_attn =
972  phba->fcoe_cvl_eventtag;
974  "2611 FLOGI failed on FCF (x%x), "
975  "status:x%x/x%x, tmo:x%x, perform "
976  "roundrobin FCF failover\n",
977  phba->fcf.current_rec.fcf_indx,
978  irsp->ulpStatus, irsp->un.ulpWord[4],
979  irsp->ulpTimeout);
981  phba->fcf.current_rec.fcf_indx);
982  fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
983  rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
984  if (rc)
985  goto out;
986  }
987 
988 stop_rr_fcf_flogi:
989  /* FLOGI failure */
991  "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
992  irsp->ulpStatus, irsp->un.ulpWord[4],
993  irsp->ulpTimeout);
994 
995  /* Check for retry */
996  if (lpfc_els_retry(phba, cmdiocb, rspiocb))
997  goto out;
998 
999  /* FLOGI failure */
1001  "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1002  irsp->ulpStatus, irsp->un.ulpWord[4],
1003  irsp->ulpTimeout);
1004 
1005  /* FLOGI failed, so there is no fabric */
1006  spin_lock_irq(shost->host_lock);
1007  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1008  spin_unlock_irq(shost->host_lock);
1009 
1010  /* If private loop, then allow max outstanding els to be
1011  * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1012  * alpa map would take too long otherwise.
1013  */
1014  if (phba->alpa_map[0] == 0)
1016  if ((phba->sli_rev == LPFC_SLI_REV4) &&
1017  (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1018  (vport->fc_prevDID != vport->fc_myDID))) {
1019  if (vport->fc_flag & FC_VFI_REGISTERED)
1020  lpfc_sli4_unreg_all_rpis(vport);
1021  lpfc_issue_reg_vfi(vport);
1022  lpfc_nlp_put(ndlp);
1023  goto out;
1024  }
1025  goto flogifail;
1026  }
1027  spin_lock_irq(shost->host_lock);
1028  vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1029  vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1030  spin_unlock_irq(shost->host_lock);
1031 
1032  /*
1033  * The FLogI succeeded. Sync the data for the CPU before
1034  * accessing it.
1035  */
1036  prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1037 
1038  sp = prsp->virt + sizeof(uint32_t);
1039 
1040  /* FLOGI completes successfully */
1042  "0101 FLOGI completes successfully "
1043  "Data: x%x x%x x%x x%x\n",
1044  irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1045  sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
1046 
1047  if (vport->port_state == LPFC_FLOGI) {
1048  /*
1049  * If Common Service Parameters indicate Nport
1050  * we are point to point, if Fport we are Fabric.
1051  */
1052  if (sp->cmn.fPort)
1053  rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1054  else if (!(phba->hba_flag & HBA_FCOE_MODE))
1055  rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1056  else {
1057  lpfc_printf_vlog(vport, KERN_ERR,
1058  LOG_FIP | LOG_ELS,
1059  "2831 FLOGI response with cleared Fabric "
1060  "bit fcf_index 0x%x "
1061  "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1062  "Fabric Name "
1063  "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1064  phba->fcf.current_rec.fcf_indx,
1065  phba->fcf.current_rec.switch_name[0],
1066  phba->fcf.current_rec.switch_name[1],
1067  phba->fcf.current_rec.switch_name[2],
1068  phba->fcf.current_rec.switch_name[3],
1069  phba->fcf.current_rec.switch_name[4],
1070  phba->fcf.current_rec.switch_name[5],
1071  phba->fcf.current_rec.switch_name[6],
1072  phba->fcf.current_rec.switch_name[7],
1073  phba->fcf.current_rec.fabric_name[0],
1074  phba->fcf.current_rec.fabric_name[1],
1075  phba->fcf.current_rec.fabric_name[2],
1076  phba->fcf.current_rec.fabric_name[3],
1077  phba->fcf.current_rec.fabric_name[4],
1078  phba->fcf.current_rec.fabric_name[5],
1079  phba->fcf.current_rec.fabric_name[6],
1080  phba->fcf.current_rec.fabric_name[7]);
1081  lpfc_nlp_put(ndlp);
1082  spin_lock_irq(&phba->hbalock);
1083  phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1084  phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1085  spin_unlock_irq(&phba->hbalock);
1086  goto out;
1087  }
1088  if (!rc) {
1089  /* Mark the FCF discovery process done */
1090  if (phba->hba_flag & HBA_FIP_SUPPORT)
1092  LOG_ELS,
1093  "2769 FLOGI to FCF (x%x) "
1094  "completed successfully\n",
1095  phba->fcf.current_rec.fcf_indx);
1096  spin_lock_irq(&phba->hbalock);
1097  phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1098  phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1099  spin_unlock_irq(&phba->hbalock);
1100  goto out;
1101  }
1102  }
1103 
1104 flogifail:
1105  lpfc_nlp_put(ndlp);
1106 
1107  if (!lpfc_error_lost_link(irsp)) {
1108  /* FLOGI failed, so just use loop map to make discovery list */
1109  lpfc_disc_list_loopmap(vport);
1110 
1111  /* Start discovery */
1112  lpfc_disc_start(vport);
1113  } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1114  (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1115  IOERR_SLI_ABORTED) &&
1116  ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1117  IOERR_SLI_DOWN))) &&
1118  (phba->link_state != LPFC_CLEAR_LA)) {
1119  /* If FLOGI failed enable link interrupt. */
1120  lpfc_issue_clear_la(phba, vport);
1121  }
1122 out:
1123  lpfc_els_free_iocb(phba, cmdiocb);
1124 }
1125 
1148 static int
1149 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1150  uint8_t retry)
1151 {
1152  struct lpfc_hba *phba = vport->phba;
1153  struct serv_parm *sp;
1154  IOCB_t *icmd;
1155  struct lpfc_iocbq *elsiocb;
1156  struct lpfc_sli_ring *pring;
1157  uint8_t *pcmd;
1158  uint16_t cmdsize;
1159  uint32_t tmo;
1160  int rc;
1161 
1162  pring = &phba->sli.ring[LPFC_ELS_RING];
1163 
1164  cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1165  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1166  ndlp->nlp_DID, ELS_CMD_FLOGI);
1167 
1168  if (!elsiocb)
1169  return 1;
1170 
1171  icmd = &elsiocb->iocb;
1172  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1173 
1174  /* For FLOGI request, remainder of payload is service parameters */
1175  *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1176  pcmd += sizeof(uint32_t);
1177  memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1178  sp = (struct serv_parm *) pcmd;
1179 
1180  /* Setup CSPs accordingly for Fabric */
1181  sp->cmn.e_d_tov = 0;
1182  sp->cmn.w2.r_a_tov = 0;
1183  sp->cmn.virtual_fabric_support = 0;
1184  sp->cls1.classValid = 0;
1185  sp->cls2.seqDelivery = 1;
1186  sp->cls3.seqDelivery = 1;
1187  if (sp->cmn.fcphLow < FC_PH3)
1188  sp->cmn.fcphLow = FC_PH3;
1189  if (sp->cmn.fcphHigh < FC_PH3)
1190  sp->cmn.fcphHigh = FC_PH3;
1191 
1192  if (phba->sli_rev == LPFC_SLI_REV4) {
1193  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1195  elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1196  elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1197  /* FLOGI needs to be 3 for WQE FCFI */
1198  /* Set the fcfi to the fcfi we registered with */
1199  elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1200  }
1201  } else {
1202  if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1203  sp->cmn.request_multiple_Nport = 1;
1204  /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1205  icmd->ulpCt_h = 1;
1206  icmd->ulpCt_l = 0;
1207  } else
1208  sp->cmn.request_multiple_Nport = 0;
1209  }
1210 
1211  if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1212  icmd->un.elsreq64.myID = 0;
1213  icmd->un.elsreq64.fl = 1;
1214  }
1215 
1216  tmo = phba->fc_ratov;
1217  phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1218  lpfc_set_disctmo(vport);
1219  phba->fc_ratov = tmo;
1220 
1221  phba->fc_stat.elsXmitFLOGI++;
1222  elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1223 
1225  "Issue FLOGI: opt:x%x",
1226  phba->sli3_options, 0, 0);
1227 
1228  rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1229  if (rc == IOCB_ERROR) {
1230  lpfc_els_free_iocb(phba, elsiocb);
1231  return 1;
1232  }
1233  return 0;
1234 }
1235 
1250 int
1252 {
1253  struct lpfc_sli_ring *pring;
1254  struct lpfc_iocbq *iocb, *next_iocb;
1255  struct lpfc_nodelist *ndlp;
1256  IOCB_t *icmd;
1257 
1258  /* Abort outstanding I/O on NPort <nlp_DID> */
1260  "0201 Abort outstanding I/O on NPort x%x\n",
1261  Fabric_DID);
1262 
1263  pring = &phba->sli.ring[LPFC_ELS_RING];
1264 
1265  /*
1266  * Check the txcmplq for an iocb that matches the nport the driver is
1267  * searching for.
1268  */
1269  spin_lock_irq(&phba->hbalock);
1270  list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1271  icmd = &iocb->iocb;
1272  if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1273  ndlp = (struct lpfc_nodelist *)(iocb->context1);
1274  if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1275  (ndlp->nlp_DID == Fabric_DID))
1276  lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1277  }
1278  }
1279  spin_unlock_irq(&phba->hbalock);
1280 
1281  return 0;
1282 }
1283 
1300 int
1302 {
1303  struct lpfc_hba *phba = vport->phba;
1304  struct lpfc_nodelist *ndlp;
1305 
1306  vport->port_state = LPFC_FLOGI;
1307  lpfc_set_disctmo(vport);
1308 
1309  /* First look for the Fabric ndlp */
1310  ndlp = lpfc_findnode_did(vport, Fabric_DID);
1311  if (!ndlp) {
1312  /* Cannot find existing Fabric ndlp, so allocate a new one */
1313  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1314  if (!ndlp)
1315  return 0;
1316  lpfc_nlp_init(vport, ndlp, Fabric_DID);
1317  /* Set the node type */
1318  ndlp->nlp_type |= NLP_FABRIC;
1319  /* Put ndlp onto node list */
1320  lpfc_enqueue_node(vport, ndlp);
1321  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1322  /* re-setup ndlp without removing from node list */
1323  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1324  if (!ndlp)
1325  return 0;
1326  }
1327 
1328  if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1329  /* This decrement of reference count to node shall kick off
1330  * the release of the node.
1331  */
1332  lpfc_nlp_put(ndlp);
1333  return 0;
1334  }
1335  return 1;
1336 }
1337 
1354 int
1356 {
1357  struct lpfc_hba *phba = vport->phba;
1358  struct lpfc_nodelist *ndlp;
1359 
1360  /* First look for the Fabric ndlp */
1361  ndlp = lpfc_findnode_did(vport, Fabric_DID);
1362  if (!ndlp) {
1363  /* Cannot find existing Fabric ndlp, so allocate a new one */
1364  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1365  if (!ndlp)
1366  return 0;
1367  lpfc_nlp_init(vport, ndlp, Fabric_DID);
1368  /* Put ndlp onto node list */
1369  lpfc_enqueue_node(vport, ndlp);
1370  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1371  /* re-setup ndlp without removing from node list */
1372  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1373  if (!ndlp)
1374  return 0;
1375  }
1376 
1377  if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1378  /* decrement node reference count to trigger the release of
1379  * the node.
1380  */
1381  lpfc_nlp_put(ndlp);
1382  return 0;
1383  }
1384  return 1;
1385 }
1386 
1398 void
1400 {
1401  int sentplogi;
1402 
1403  if (vport->num_disc_nodes)
1404  vport->num_disc_nodes--;
1405 
1406  /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1408  "0232 Continue discovery with %d PLOGIs to go "
1409  "Data: x%x x%x x%x\n",
1410  vport->num_disc_nodes, vport->fc_plogi_cnt,
1411  vport->fc_flag, vport->port_state);
1412  /* Check to see if there are more PLOGIs to be sent */
1413  if (vport->fc_flag & FC_NLP_MORE)
1414  /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1415  sentplogi = lpfc_els_disc_plogi(vport);
1416 
1417  return;
1418 }
1419 
1451 static struct lpfc_nodelist *
1452 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1453  struct lpfc_nodelist *ndlp)
1454 {
1455  struct lpfc_vport *vport = ndlp->vport;
1456  struct lpfc_nodelist *new_ndlp;
1457  struct lpfc_rport_data *rdata;
1458  struct fc_rport *rport;
1459  struct serv_parm *sp;
1460  uint8_t name[sizeof(struct lpfc_name)];
1461  uint32_t rc, keepDID = 0;
1462  int put_node;
1463  int put_rport;
1464  struct lpfc_node_rrqs rrq;
1465 
1466  /* Fabric nodes can have the same WWPN so we don't bother searching
1467  * by WWPN. Just return the ndlp that was given to us.
1468  */
1469  if (ndlp->nlp_type & NLP_FABRIC)
1470  return ndlp;
1471 
1472  sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1473  memset(name, 0, sizeof(struct lpfc_name));
1474 
1475  /* Now we find out if the NPort we are logging into, matches the WWPN
1476  * we have for that ndlp. If not, we have some work to do.
1477  */
1478  new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1479 
1480  if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1481  return ndlp;
1482  memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
1483 
1485  "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1486  ndlp, ndlp->nlp_DID, new_ndlp);
1487 
1488  if (!new_ndlp) {
1489  rc = memcmp(&ndlp->nlp_portname, name,
1490  sizeof(struct lpfc_name));
1491  if (!rc)
1492  return ndlp;
1493  new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1494  if (!new_ndlp)
1495  return ndlp;
1496  lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1497  } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1498  rc = memcmp(&ndlp->nlp_portname, name,
1499  sizeof(struct lpfc_name));
1500  if (!rc)
1501  return ndlp;
1502  new_ndlp = lpfc_enable_node(vport, new_ndlp,
1504  if (!new_ndlp)
1505  return ndlp;
1506  keepDID = new_ndlp->nlp_DID;
1507  if (phba->sli_rev == LPFC_SLI_REV4)
1508  memcpy(&rrq.xri_bitmap,
1509  &new_ndlp->active_rrqs.xri_bitmap,
1510  sizeof(new_ndlp->active_rrqs.xri_bitmap));
1511  } else {
1512  keepDID = new_ndlp->nlp_DID;
1513  if (phba->sli_rev == LPFC_SLI_REV4)
1514  memcpy(&rrq.xri_bitmap,
1515  &new_ndlp->active_rrqs.xri_bitmap,
1516  sizeof(new_ndlp->active_rrqs.xri_bitmap));
1517  }
1518 
1519  lpfc_unreg_rpi(vport, new_ndlp);
1520  new_ndlp->nlp_DID = ndlp->nlp_DID;
1521  new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1522  if (phba->sli_rev == LPFC_SLI_REV4)
1523  memcpy(new_ndlp->active_rrqs.xri_bitmap,
1524  &ndlp->active_rrqs.xri_bitmap,
1525  sizeof(ndlp->active_rrqs.xri_bitmap));
1526 
1527  if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1528  new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1529  ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1530 
1531  /* Set state will put new_ndlp on to node list if not already done */
1532  lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1533 
1534  /* Move this back to NPR state */
1535  if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1536  /* The new_ndlp is replacing ndlp totally, so we need
1537  * to put ndlp on UNUSED list and try to free it.
1538  */
1540  "3179 PLOGI confirm NEW: %x %x\n",
1541  new_ndlp->nlp_DID, keepDID);
1542 
1543  /* Fix up the rport accordingly */
1544  rport = ndlp->rport;
1545  if (rport) {
1546  rdata = rport->dd_data;
1547  if (rdata->pnode == ndlp) {
1548  lpfc_nlp_put(ndlp);
1549  ndlp->rport = NULL;
1550  rdata->pnode = lpfc_nlp_get(new_ndlp);
1551  new_ndlp->rport = rport;
1552  }
1553  new_ndlp->nlp_type = ndlp->nlp_type;
1554  }
1555  /* We shall actually free the ndlp with both nlp_DID and
1556  * nlp_portname fields equals 0 to avoid any ndlp on the
1557  * nodelist never to be used.
1558  */
1559  if (ndlp->nlp_DID == 0) {
1560  spin_lock_irq(&phba->ndlp_lock);
1561  NLP_SET_FREE_REQ(ndlp);
1562  spin_unlock_irq(&phba->ndlp_lock);
1563  }
1564 
1565  /* Two ndlps cannot have the same did on the nodelist */
1566  ndlp->nlp_DID = keepDID;
1567  if (phba->sli_rev == LPFC_SLI_REV4)
1568  memcpy(&ndlp->active_rrqs.xri_bitmap,
1569  &rrq.xri_bitmap,
1570  sizeof(ndlp->active_rrqs.xri_bitmap));
1571  lpfc_drop_node(vport, ndlp);
1572  }
1573  else {
1575  "3180 PLOGI confirm SWAP: %x %x\n",
1576  new_ndlp->nlp_DID, keepDID);
1577 
1578  lpfc_unreg_rpi(vport, ndlp);
1579 
1580  /* Two ndlps cannot have the same did */
1581  ndlp->nlp_DID = keepDID;
1582  if (phba->sli_rev == LPFC_SLI_REV4)
1583  memcpy(&ndlp->active_rrqs.xri_bitmap,
1584  &rrq.xri_bitmap,
1585  sizeof(ndlp->active_rrqs.xri_bitmap));
1586 
1587  /* Since we are swapping the ndlp passed in with the new one
1588  * and the did has already been swapped, copy over state.
1589  * The new WWNs are already in new_ndlp since thats what
1590  * we looked it up by in the begining of this routine.
1591  */
1592  new_ndlp->nlp_state = ndlp->nlp_state;
1593 
1594  /* Since we are switching over to the new_ndlp, the old
1595  * ndlp should be put in the NPR state, unless we have
1596  * already started re-discovery on it.
1597  */
1598  if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1599  (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1600  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1601 
1602  /* Fix up the rport accordingly */
1603  rport = ndlp->rport;
1604  if (rport) {
1605  rdata = rport->dd_data;
1606  put_node = rdata->pnode != NULL;
1607  put_rport = ndlp->rport != NULL;
1608  rdata->pnode = NULL;
1609  ndlp->rport = NULL;
1610  if (put_node)
1611  lpfc_nlp_put(ndlp);
1612  if (put_rport)
1613  put_device(&rport->dev);
1614  }
1615  }
1616  return new_ndlp;
1617 }
1618 
1630 void
1632 {
1633  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1634 
1635  if (vport->fc_flag & FC_RSCN_MODE) {
1636  /*
1637  * Check to see if more RSCNs came in while we were
1638  * processing this one.
1639  */
1640  if (vport->fc_rscn_id_cnt ||
1641  (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1642  lpfc_els_handle_rscn(vport);
1643  else {
1644  spin_lock_irq(shost->host_lock);
1645  vport->fc_flag &= ~FC_RSCN_MODE;
1646  spin_unlock_irq(shost->host_lock);
1647  }
1648  }
1649 }
1650 
1663 static void
1664 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1665  struct lpfc_iocbq *rspiocb)
1666 {
1667  struct lpfc_vport *vport = cmdiocb->vport;
1668  IOCB_t *irsp;
1669  struct lpfc_nodelist *ndlp;
1670  struct lpfc_node_rrq *rrq;
1671 
1672  /* we pass cmdiocb to state machine which needs rspiocb as well */
1673  rrq = cmdiocb->context_un.rrq;
1674  cmdiocb->context_un.rsp_iocb = rspiocb;
1675 
1676  irsp = &rspiocb->iocb;
1678  "RRQ cmpl: status:x%x/x%x did:x%x",
1679  irsp->ulpStatus, irsp->un.ulpWord[4],
1680  irsp->un.elsreq64.remoteID);
1681 
1682  ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1683  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1685  "2882 RRQ completes to NPort x%x "
1686  "with no ndlp. Data: x%x x%x x%x\n",
1687  irsp->un.elsreq64.remoteID,
1688  irsp->ulpStatus, irsp->un.ulpWord[4],
1689  irsp->ulpIoTag);
1690  goto out;
1691  }
1692 
1693  /* rrq completes to NPort <nlp_DID> */
1695  "2880 RRQ completes to NPort x%x "
1696  "Data: x%x x%x x%x x%x x%x\n",
1697  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1698  irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1699 
1700  if (irsp->ulpStatus) {
1701  /* Check for retry */
1702  /* RRQ failed Don't print the vport to vport rjts */
1703  if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1704  (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1705  ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1706  (phba)->pport->cfg_log_verbose & LOG_ELS)
1707  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1708  "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1709  ndlp->nlp_DID, irsp->ulpStatus,
1710  irsp->un.ulpWord[4]);
1711  }
1712 out:
1713  if (rrq)
1714  lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1715  lpfc_els_free_iocb(phba, cmdiocb);
1716  return;
1717 }
1738 static void
1739 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1740  struct lpfc_iocbq *rspiocb)
1741 {
1742  struct lpfc_vport *vport = cmdiocb->vport;
1743  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1744  IOCB_t *irsp;
1745  struct lpfc_nodelist *ndlp;
1746  struct lpfc_dmabuf *prsp;
1747  int disc, rc, did, type;
1748 
1749  /* we pass cmdiocb to state machine which needs rspiocb as well */
1750  cmdiocb->context_un.rsp_iocb = rspiocb;
1751 
1752  irsp = &rspiocb->iocb;
1754  "PLOGI cmpl: status:x%x/x%x did:x%x",
1755  irsp->ulpStatus, irsp->un.ulpWord[4],
1756  irsp->un.elsreq64.remoteID);
1757 
1758  ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1759  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1761  "0136 PLOGI completes to NPort x%x "
1762  "with no ndlp. Data: x%x x%x x%x\n",
1763  irsp->un.elsreq64.remoteID,
1764  irsp->ulpStatus, irsp->un.ulpWord[4],
1765  irsp->ulpIoTag);
1766  goto out;
1767  }
1768 
1769  /* Since ndlp can be freed in the disc state machine, note if this node
1770  * is being used during discovery.
1771  */
1772  spin_lock_irq(shost->host_lock);
1773  disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1774  ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1775  spin_unlock_irq(shost->host_lock);
1776  rc = 0;
1777 
1778  /* PLOGI completes to NPort <nlp_DID> */
1780  "0102 PLOGI completes to NPort x%x "
1781  "Data: x%x x%x x%x x%x x%x\n",
1782  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1783  irsp->ulpTimeout, disc, vport->num_disc_nodes);
1784  /* Check to see if link went down during discovery */
1785  if (lpfc_els_chk_latt(vport)) {
1786  spin_lock_irq(shost->host_lock);
1787  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1788  spin_unlock_irq(shost->host_lock);
1789  goto out;
1790  }
1791 
1792  /* ndlp could be freed in DSM, save these values now */
1793  type = ndlp->nlp_type;
1794  did = ndlp->nlp_DID;
1795 
1796  if (irsp->ulpStatus) {
1797  /* Check for retry */
1798  if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1799  /* ELS command is being retried */
1800  if (disc) {
1801  spin_lock_irq(shost->host_lock);
1802  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1803  spin_unlock_irq(shost->host_lock);
1804  }
1805  goto out;
1806  }
1807  /* PLOGI failed Don't print the vport to vport rjts */
1808  if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1809  (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1810  ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1811  (phba)->pport->cfg_log_verbose & LOG_ELS)
1812  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1813  "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1814  ndlp->nlp_DID, irsp->ulpStatus,
1815  irsp->un.ulpWord[4]);
1816  /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1817  if (lpfc_error_lost_link(irsp))
1818  rc = NLP_STE_FREED_NODE;
1819  else
1820  rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1822  } else {
1823  /* Good status, call state machine */
1824  prsp = list_entry(((struct lpfc_dmabuf *)
1825  cmdiocb->context2)->list.next,
1826  struct lpfc_dmabuf, list);
1827  ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1828  rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1830  }
1831 
1832  if (disc && vport->num_disc_nodes) {
1833  /* Check to see if there are more PLOGIs to be sent */
1834  lpfc_more_plogi(vport);
1835 
1836  if (vport->num_disc_nodes == 0) {
1837  spin_lock_irq(shost->host_lock);
1838  vport->fc_flag &= ~FC_NDISC_ACTIVE;
1839  spin_unlock_irq(shost->host_lock);
1840 
1841  lpfc_can_disctmo(vport);
1842  lpfc_end_rscn(vport);
1843  }
1844  }
1845 
1846 out:
1847  lpfc_els_free_iocb(phba, cmdiocb);
1848  return;
1849 }
1850 
1872 int
1874 {
1875  struct lpfc_hba *phba = vport->phba;
1876  struct serv_parm *sp;
1877  IOCB_t *icmd;
1878  struct lpfc_nodelist *ndlp;
1879  struct lpfc_iocbq *elsiocb;
1880  struct lpfc_sli *psli;
1881  uint8_t *pcmd;
1882  uint16_t cmdsize;
1883  int ret;
1884 
1885  psli = &phba->sli;
1886 
1887  ndlp = lpfc_findnode_did(vport, did);
1888  if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1889  ndlp = NULL;
1890 
1891  /* If ndlp is not NULL, we will bump the reference count on it */
1892  cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1893  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1894  ELS_CMD_PLOGI);
1895  if (!elsiocb)
1896  return 1;
1897 
1898  icmd = &elsiocb->iocb;
1899  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1900 
1901  /* For PLOGI request, remainder of payload is service parameters */
1902  *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1903  pcmd += sizeof(uint32_t);
1904  memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1905  sp = (struct serv_parm *) pcmd;
1906 
1907  /*
1908  * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1909  * to device on remote loops work.
1910  */
1911  if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1912  sp->cmn.altBbCredit = 1;
1913 
1914  if (sp->cmn.fcphLow < FC_PH_4_3)
1915  sp->cmn.fcphLow = FC_PH_4_3;
1916 
1917  if (sp->cmn.fcphHigh < FC_PH3)
1918  sp->cmn.fcphHigh = FC_PH3;
1919 
1921  "Issue PLOGI: did:x%x",
1922  did, 0, 0);
1923 
1924  phba->fc_stat.elsXmitPLOGI++;
1925  elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1926  ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1927 
1928  if (ret == IOCB_ERROR) {
1929  lpfc_els_free_iocb(phba, elsiocb);
1930  return 1;
1931  }
1932  return 0;
1933 }
1934 
1948 static void
1949 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1950  struct lpfc_iocbq *rspiocb)
1951 {
1952  struct lpfc_vport *vport = cmdiocb->vport;
1953  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1954  IOCB_t *irsp;
1955  struct lpfc_sli *psli;
1956  struct lpfc_nodelist *ndlp;
1957 
1958  psli = &phba->sli;
1959  /* we pass cmdiocb to state machine which needs rspiocb as well */
1960  cmdiocb->context_un.rsp_iocb = rspiocb;
1961 
1962  irsp = &(rspiocb->iocb);
1963  ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1964  spin_lock_irq(shost->host_lock);
1965  ndlp->nlp_flag &= ~NLP_PRLI_SND;
1966  spin_unlock_irq(shost->host_lock);
1967 
1969  "PRLI cmpl: status:x%x/x%x did:x%x",
1970  irsp->ulpStatus, irsp->un.ulpWord[4],
1971  ndlp->nlp_DID);
1972  /* PRLI completes to NPort <nlp_DID> */
1973  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1974  "0103 PRLI completes to NPort x%x "
1975  "Data: x%x x%x x%x x%x\n",
1976  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1977  irsp->ulpTimeout, vport->num_disc_nodes);
1978 
1979  vport->fc_prli_sent--;
1980  /* Check to see if link went down during discovery */
1981  if (lpfc_els_chk_latt(vport))
1982  goto out;
1983 
1984  if (irsp->ulpStatus) {
1985  /* Check for retry */
1986  if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1987  /* ELS command is being retried */
1988  goto out;
1989  }
1990  /* PRLI failed */
1991  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1992  "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1993  ndlp->nlp_DID, irsp->ulpStatus,
1994  irsp->un.ulpWord[4]);
1995  /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1996  if (lpfc_error_lost_link(irsp))
1997  goto out;
1998  else
1999  lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2001  } else
2002  /* Good status, call state machine */
2003  lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2005 out:
2006  lpfc_els_free_iocb(phba, cmdiocb);
2007  return;
2008 }
2009 
2031 int
2032 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2033  uint8_t retry)
2034 {
2035  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2036  struct lpfc_hba *phba = vport->phba;
2037  PRLI *npr;
2038  IOCB_t *icmd;
2039  struct lpfc_iocbq *elsiocb;
2040  uint8_t *pcmd;
2041  uint16_t cmdsize;
2042 
2043  cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2044  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2045  ndlp->nlp_DID, ELS_CMD_PRLI);
2046  if (!elsiocb)
2047  return 1;
2048 
2049  icmd = &elsiocb->iocb;
2050  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2051 
2052  /* For PRLI request, remainder of payload is service parameters */
2053  memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
2054  *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
2055  pcmd += sizeof(uint32_t);
2056 
2057  /* For PRLI, remainder of payload is PRLI parameter page */
2058  npr = (PRLI *) pcmd;
2059  /*
2060  * If our firmware version is 3.20 or later,
2061  * set the following bits for FC-TAPE support.
2062  */
2063  if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2064  npr->ConfmComplAllowed = 1;
2065  npr->Retry = 1;
2066  npr->TaskRetryIdReq = 1;
2067  }
2068  npr->estabImagePair = 1;
2069  npr->readXferRdyDis = 1;
2070 
2071  /* For FCP support */
2072  npr->prliType = PRLI_FCP_TYPE;
2073  npr->initiatorFunc = 1;
2074 
2076  "Issue PRLI: did:x%x",
2077  ndlp->nlp_DID, 0, 0);
2078 
2079  phba->fc_stat.elsXmitPRLI++;
2080  elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2081  spin_lock_irq(shost->host_lock);
2082  ndlp->nlp_flag |= NLP_PRLI_SND;
2083  spin_unlock_irq(shost->host_lock);
2084  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2085  IOCB_ERROR) {
2086  spin_lock_irq(shost->host_lock);
2087  ndlp->nlp_flag &= ~NLP_PRLI_SND;
2088  spin_unlock_irq(shost->host_lock);
2089  lpfc_els_free_iocb(phba, elsiocb);
2090  return 1;
2091  }
2092  vport->fc_prli_sent++;
2093  return 0;
2094 }
2095 
2108 static void
2109 lpfc_rscn_disc(struct lpfc_vport *vport)
2110 {
2111  lpfc_can_disctmo(vport);
2112 
2113  /* RSCN discovery */
2114  /* go thru NPR nodes and issue ELS PLOGIs */
2115  if (vport->fc_npr_cnt)
2116  if (lpfc_els_disc_plogi(vport))
2117  return;
2118 
2119  lpfc_end_rscn(vport);
2120 }
2121 
2132 static void
2133 lpfc_adisc_done(struct lpfc_vport *vport)
2134 {
2135  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2136  struct lpfc_hba *phba = vport->phba;
2137 
2138  /*
2139  * For NPIV, cmpl_reg_vpi will set port_state to READY,
2140  * and continue discovery.
2141  */
2142  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2143  !(vport->fc_flag & FC_RSCN_MODE) &&
2144  (phba->sli_rev < LPFC_SLI_REV4)) {
2145  lpfc_issue_reg_vpi(phba, vport);
2146  return;
2147  }
2148  /*
2149  * For SLI2, we need to set port_state to READY
2150  * and continue discovery.
2151  */
2152  if (vport->port_state < LPFC_VPORT_READY) {
2153  /* If we get here, there is nothing to ADISC */
2154  if (vport->port_type == LPFC_PHYSICAL_PORT)
2155  lpfc_issue_clear_la(phba, vport);
2156  if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2157  vport->num_disc_nodes = 0;
2158  /* go thru NPR list, issue ELS PLOGIs */
2159  if (vport->fc_npr_cnt)
2160  lpfc_els_disc_plogi(vport);
2161  if (!vport->num_disc_nodes) {
2162  spin_lock_irq(shost->host_lock);
2163  vport->fc_flag &= ~FC_NDISC_ACTIVE;
2164  spin_unlock_irq(shost->host_lock);
2165  lpfc_can_disctmo(vport);
2166  lpfc_end_rscn(vport);
2167  }
2168  }
2169  vport->port_state = LPFC_VPORT_READY;
2170  } else
2171  lpfc_rscn_disc(vport);
2172 }
2173 
2183 void
2185 {
2186  int sentadisc;
2187 
2188  if (vport->num_disc_nodes)
2189  vport->num_disc_nodes--;
2190  /* Continue discovery with <num_disc_nodes> ADISCs to go */
2192  "0210 Continue discovery with %d ADISCs to go "
2193  "Data: x%x x%x x%x\n",
2194  vport->num_disc_nodes, vport->fc_adisc_cnt,
2195  vport->fc_flag, vport->port_state);
2196  /* Check to see if there are more ADISCs to be sent */
2197  if (vport->fc_flag & FC_NLP_MORE) {
2198  lpfc_set_disctmo(vport);
2199  /* go thru NPR nodes and issue any remaining ELS ADISCs */
2200  sentadisc = lpfc_els_disc_adisc(vport);
2201  }
2202  if (!vport->num_disc_nodes)
2203  lpfc_adisc_done(vport);
2204  return;
2205 }
2206 
2223 static void
2224 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2225  struct lpfc_iocbq *rspiocb)
2226 {
2227  struct lpfc_vport *vport = cmdiocb->vport;
2228  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2229  IOCB_t *irsp;
2230  struct lpfc_nodelist *ndlp;
2231  int disc;
2232 
2233  /* we pass cmdiocb to state machine which needs rspiocb as well */
2234  cmdiocb->context_un.rsp_iocb = rspiocb;
2235 
2236  irsp = &(rspiocb->iocb);
2237  ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2238 
2240  "ADISC cmpl: status:x%x/x%x did:x%x",
2241  irsp->ulpStatus, irsp->un.ulpWord[4],
2242  ndlp->nlp_DID);
2243 
2244  /* Since ndlp can be freed in the disc state machine, note if this node
2245  * is being used during discovery.
2246  */
2247  spin_lock_irq(shost->host_lock);
2248  disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2249  ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2250  spin_unlock_irq(shost->host_lock);
2251  /* ADISC completes to NPort <nlp_DID> */
2252  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2253  "0104 ADISC completes to NPort x%x "
2254  "Data: x%x x%x x%x x%x x%x\n",
2255  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2256  irsp->ulpTimeout, disc, vport->num_disc_nodes);
2257  /* Check to see if link went down during discovery */
2258  if (lpfc_els_chk_latt(vport)) {
2259  spin_lock_irq(shost->host_lock);
2260  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2261  spin_unlock_irq(shost->host_lock);
2262  goto out;
2263  }
2264 
2265  if (irsp->ulpStatus) {
2266  /* Check for retry */
2267  if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2268  /* ELS command is being retried */
2269  if (disc) {
2270  spin_lock_irq(shost->host_lock);
2271  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2272  spin_unlock_irq(shost->host_lock);
2273  lpfc_set_disctmo(vport);
2274  }
2275  goto out;
2276  }
2277  /* ADISC failed */
2278  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2279  "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2280  ndlp->nlp_DID, irsp->ulpStatus,
2281  irsp->un.ulpWord[4]);
2282  /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2283  if (!lpfc_error_lost_link(irsp))
2284  lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2286  } else
2287  /* Good status, call state machine */
2288  lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2290 
2291  /* Check to see if there are more ADISCs to be sent */
2292  if (disc && vport->num_disc_nodes)
2293  lpfc_more_adisc(vport);
2294 out:
2295  lpfc_els_free_iocb(phba, cmdiocb);
2296  return;
2297 }
2298 
2319 int
2320 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2321  uint8_t retry)
2322 {
2323  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2324  struct lpfc_hba *phba = vport->phba;
2325  ADISC *ap;
2326  IOCB_t *icmd;
2327  struct lpfc_iocbq *elsiocb;
2328  uint8_t *pcmd;
2329  uint16_t cmdsize;
2330 
2331  cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2332  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2333  ndlp->nlp_DID, ELS_CMD_ADISC);
2334  if (!elsiocb)
2335  return 1;
2336 
2337  icmd = &elsiocb->iocb;
2338  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2339 
2340  /* For ADISC request, remainder of payload is service parameters */
2341  *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2342  pcmd += sizeof(uint32_t);
2343 
2344  /* Fill in ADISC payload */
2345  ap = (ADISC *) pcmd;
2346  ap->hardAL_PA = phba->fc_pref_ALPA;
2347  memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2348  memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2349  ap->DID = be32_to_cpu(vport->fc_myDID);
2350 
2352  "Issue ADISC: did:x%x",
2353  ndlp->nlp_DID, 0, 0);
2354 
2355  phba->fc_stat.elsXmitADISC++;
2356  elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2357  spin_lock_irq(shost->host_lock);
2358  ndlp->nlp_flag |= NLP_ADISC_SND;
2359  spin_unlock_irq(shost->host_lock);
2360  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2361  IOCB_ERROR) {
2362  spin_lock_irq(shost->host_lock);
2363  ndlp->nlp_flag &= ~NLP_ADISC_SND;
2364  spin_unlock_irq(shost->host_lock);
2365  lpfc_els_free_iocb(phba, elsiocb);
2366  return 1;
2367  }
2368  return 0;
2369 }
2370 
2383 static void
2384 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2385  struct lpfc_iocbq *rspiocb)
2386 {
2387  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2388  struct lpfc_vport *vport = ndlp->vport;
2389  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2390  IOCB_t *irsp;
2391  struct lpfc_sli *psli;
2392  struct lpfcMboxq *mbox;
2393  unsigned long flags;
2394  uint32_t skip_recovery = 0;
2395 
2396  psli = &phba->sli;
2397  /* we pass cmdiocb to state machine which needs rspiocb as well */
2398  cmdiocb->context_un.rsp_iocb = rspiocb;
2399 
2400  irsp = &(rspiocb->iocb);
2401  spin_lock_irq(shost->host_lock);
2402  ndlp->nlp_flag &= ~NLP_LOGO_SND;
2403  spin_unlock_irq(shost->host_lock);
2404 
2406  "LOGO cmpl: status:x%x/x%x did:x%x",
2407  irsp->ulpStatus, irsp->un.ulpWord[4],
2408  ndlp->nlp_DID);
2409 
2410  /* LOGO completes to NPort <nlp_DID> */
2411  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2412  "0105 LOGO completes to NPort x%x "
2413  "Data: x%x x%x x%x x%x\n",
2414  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2415  irsp->ulpTimeout, vport->num_disc_nodes);
2416 
2417  if (lpfc_els_chk_latt(vport)) {
2418  skip_recovery = 1;
2419  goto out;
2420  }
2421 
2422  /* Check to see if link went down during discovery */
2423  if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2424  /* NLP_EVT_DEVICE_RM should unregister the RPI
2425  * which should abort all outstanding IOs.
2426  */
2427  lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2429  skip_recovery = 1;
2430  goto out;
2431  }
2432 
2433  if (irsp->ulpStatus) {
2434  /* Check for retry */
2435  if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2436  /* ELS command is being retried */
2437  skip_recovery = 1;
2438  goto out;
2439  }
2440  /* LOGO failed */
2441  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2442  "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2443  ndlp->nlp_DID, irsp->ulpStatus,
2444  irsp->un.ulpWord[4]);
2445  /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2446  if (lpfc_error_lost_link(irsp)) {
2447  skip_recovery = 1;
2448  goto out;
2449  }
2450  }
2451 
2452  /* Call state machine. This will unregister the rpi if needed. */
2453  lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2454 
2455 out:
2456  lpfc_els_free_iocb(phba, cmdiocb);
2457  /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2458  if ((vport->fc_flag & FC_PT2PT) &&
2459  !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2460  phba->pport->fc_myDID = 0;
2461  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2462  if (mbox) {
2463  lpfc_config_link(phba, mbox);
2465  mbox->vport = vport;
2466  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2467  MBX_NOT_FINISHED) {
2468  mempool_free(mbox, phba->mbox_mem_pool);
2469  skip_recovery = 1;
2470  }
2471  }
2472  }
2473 
2474  /*
2475  * If the node is a target, the handling attempts to recover the port.
2476  * For any other port type, the rpi is unregistered as an implicit
2477  * LOGO.
2478  */
2479  if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2480  lpfc_cancel_retry_delay_tmo(vport, ndlp);
2481  spin_lock_irqsave(shost->host_lock, flags);
2482  ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2483  spin_unlock_irqrestore(shost->host_lock, flags);
2484 
2485  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2486  "3187 LOGO completes to NPort x%x: Start "
2487  "Recovery Data: x%x x%x x%x x%x\n",
2488  ndlp->nlp_DID, irsp->ulpStatus,
2489  irsp->un.ulpWord[4], irsp->ulpTimeout,
2490  vport->num_disc_nodes);
2491  lpfc_disc_start(vport);
2492  }
2493  return;
2494 }
2495 
2516 int
2517 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2518  uint8_t retry)
2519 {
2520  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2521  struct lpfc_hba *phba = vport->phba;
2522  IOCB_t *icmd;
2523  struct lpfc_iocbq *elsiocb;
2524  uint8_t *pcmd;
2525  uint16_t cmdsize;
2526  int rc;
2527 
2528  spin_lock_irq(shost->host_lock);
2529  if (ndlp->nlp_flag & NLP_LOGO_SND) {
2530  spin_unlock_irq(shost->host_lock);
2531  return 0;
2532  }
2533  spin_unlock_irq(shost->host_lock);
2534 
2535  cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2536  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2537  ndlp->nlp_DID, ELS_CMD_LOGO);
2538  if (!elsiocb)
2539  return 1;
2540 
2541  icmd = &elsiocb->iocb;
2542  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2543  *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2544  pcmd += sizeof(uint32_t);
2545 
2546  /* Fill in LOGO payload */
2547  *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2548  pcmd += sizeof(uint32_t);
2549  memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2550 
2552  "Issue LOGO: did:x%x",
2553  ndlp->nlp_DID, 0, 0);
2554 
2555  /*
2556  * If we are issuing a LOGO, we may try to recover the remote NPort
2557  * by issuing a PLOGI later. Even though we issue ELS cmds by the
2558  * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2559  * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2560  * for that ELS cmd. To avoid this situation, lets get rid of the
2561  * RPI right now, before any ELS cmds are sent.
2562  */
2563  spin_lock_irq(shost->host_lock);
2564  ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2565  spin_unlock_irq(shost->host_lock);
2566  if (lpfc_unreg_rpi(vport, ndlp)) {
2567  lpfc_els_free_iocb(phba, elsiocb);
2568  return 0;
2569  }
2570 
2571  phba->fc_stat.elsXmitLOGO++;
2572  elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2573  spin_lock_irq(shost->host_lock);
2574  ndlp->nlp_flag |= NLP_LOGO_SND;
2575  ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2576  spin_unlock_irq(shost->host_lock);
2577  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2578 
2579  if (rc == IOCB_ERROR) {
2580  spin_lock_irq(shost->host_lock);
2581  ndlp->nlp_flag &= ~NLP_LOGO_SND;
2582  spin_unlock_irq(shost->host_lock);
2583  lpfc_els_free_iocb(phba, elsiocb);
2584  return 1;
2585  }
2586  return 0;
2587 }
2588 
2605 static void
2606 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2607  struct lpfc_iocbq *rspiocb)
2608 {
2609  struct lpfc_vport *vport = cmdiocb->vport;
2610  IOCB_t *irsp;
2611 
2612  irsp = &rspiocb->iocb;
2613 
2615  "ELS cmd cmpl: status:x%x/x%x did:x%x",
2616  irsp->ulpStatus, irsp->un.ulpWord[4],
2617  irsp->un.elsreq64.remoteID);
2618  /* ELS cmd tag <ulpIoTag> completes */
2619  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2620  "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2621  irsp->ulpIoTag, irsp->ulpStatus,
2622  irsp->un.ulpWord[4], irsp->ulpTimeout);
2623  /* Check to see if link went down during discovery */
2624  lpfc_els_chk_latt(vport);
2625  lpfc_els_free_iocb(phba, cmdiocb);
2626  return;
2627 }
2628 
2651 int
2652 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2653 {
2654  struct lpfc_hba *phba = vport->phba;
2655  IOCB_t *icmd;
2656  struct lpfc_iocbq *elsiocb;
2657  struct lpfc_sli *psli;
2658  uint8_t *pcmd;
2659  uint16_t cmdsize;
2660  struct lpfc_nodelist *ndlp;
2661 
2662  psli = &phba->sli;
2663  cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2664 
2665  ndlp = lpfc_findnode_did(vport, nportid);
2666  if (!ndlp) {
2667  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2668  if (!ndlp)
2669  return 1;
2670  lpfc_nlp_init(vport, ndlp, nportid);
2671  lpfc_enqueue_node(vport, ndlp);
2672  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2673  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2674  if (!ndlp)
2675  return 1;
2676  }
2677 
2678  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2679  ndlp->nlp_DID, ELS_CMD_SCR);
2680 
2681  if (!elsiocb) {
2682  /* This will trigger the release of the node just
2683  * allocated
2684  */
2685  lpfc_nlp_put(ndlp);
2686  return 1;
2687  }
2688 
2689  icmd = &elsiocb->iocb;
2690  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2691 
2692  *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2693  pcmd += sizeof(uint32_t);
2694 
2695  /* For SCR, remainder of payload is SCR parameter page */
2696  memset(pcmd, 0, sizeof(SCR));
2697  ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2698 
2700  "Issue SCR: did:x%x",
2701  ndlp->nlp_DID, 0, 0);
2702 
2703  phba->fc_stat.elsXmitSCR++;
2704  elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2705  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2706  IOCB_ERROR) {
2707  /* The additional lpfc_nlp_put will cause the following
2708  * lpfc_els_free_iocb routine to trigger the rlease of
2709  * the node.
2710  */
2711  lpfc_nlp_put(ndlp);
2712  lpfc_els_free_iocb(phba, elsiocb);
2713  return 1;
2714  }
2715  /* This will cause the callback-function lpfc_cmpl_els_cmd to
2716  * trigger the release of node.
2717  */
2718  lpfc_nlp_put(ndlp);
2719  return 0;
2720 }
2721 
2744 static int
2745 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2746 {
2747  struct lpfc_hba *phba = vport->phba;
2748  IOCB_t *icmd;
2749  struct lpfc_iocbq *elsiocb;
2750  struct lpfc_sli *psli;
2751  FARP *fp;
2752  uint8_t *pcmd;
2753  uint32_t *lp;
2754  uint16_t cmdsize;
2755  struct lpfc_nodelist *ondlp;
2756  struct lpfc_nodelist *ndlp;
2757 
2758  psli = &phba->sli;
2759  cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2760 
2761  ndlp = lpfc_findnode_did(vport, nportid);
2762  if (!ndlp) {
2763  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2764  if (!ndlp)
2765  return 1;
2766  lpfc_nlp_init(vport, ndlp, nportid);
2767  lpfc_enqueue_node(vport, ndlp);
2768  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2769  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2770  if (!ndlp)
2771  return 1;
2772  }
2773 
2774  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2775  ndlp->nlp_DID, ELS_CMD_RNID);
2776  if (!elsiocb) {
2777  /* This will trigger the release of the node just
2778  * allocated
2779  */
2780  lpfc_nlp_put(ndlp);
2781  return 1;
2782  }
2783 
2784  icmd = &elsiocb->iocb;
2785  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2786 
2787  *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2788  pcmd += sizeof(uint32_t);
2789 
2790  /* Fill in FARPR payload */
2791  fp = (FARP *) (pcmd);
2792  memset(fp, 0, sizeof(FARP));
2793  lp = (uint32_t *) pcmd;
2794  *lp++ = be32_to_cpu(nportid);
2795  *lp++ = be32_to_cpu(vport->fc_myDID);
2796  fp->Rflags = 0;
2798 
2799  memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2800  memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2801  ondlp = lpfc_findnode_did(vport, nportid);
2802  if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2803  memcpy(&fp->OportName, &ondlp->nlp_portname,
2804  sizeof(struct lpfc_name));
2805  memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2806  sizeof(struct lpfc_name));
2807  }
2808 
2810  "Issue FARPR: did:x%x",
2811  ndlp->nlp_DID, 0, 0);
2812 
2813  phba->fc_stat.elsXmitFARPR++;
2814  elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2815  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2816  IOCB_ERROR) {
2817  /* The additional lpfc_nlp_put will cause the following
2818  * lpfc_els_free_iocb routine to trigger the release of
2819  * the node.
2820  */
2821  lpfc_nlp_put(ndlp);
2822  lpfc_els_free_iocb(phba, elsiocb);
2823  return 1;
2824  }
2825  /* This will cause the callback-function lpfc_cmpl_els_cmd to
2826  * trigger the release of the node.
2827  */
2828  lpfc_nlp_put(ndlp);
2829  return 0;
2830 }
2831 
2844 void
2846 {
2847  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2848  struct lpfc_work_evt *evtp;
2849 
2850  if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2851  return;
2852  spin_lock_irq(shost->host_lock);
2853  nlp->nlp_flag &= ~NLP_DELAY_TMO;
2854  spin_unlock_irq(shost->host_lock);
2856  nlp->nlp_last_elscmd = 0;
2857  if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2858  list_del_init(&nlp->els_retry_evt.evt_listp);
2859  /* Decrement nlp reference count held for the delayed retry */
2860  evtp = &nlp->els_retry_evt;
2861  lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2862  }
2863  if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2864  spin_lock_irq(shost->host_lock);
2865  nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2866  spin_unlock_irq(shost->host_lock);
2867  if (vport->num_disc_nodes) {
2868  if (vport->port_state < LPFC_VPORT_READY) {
2869  /* Check if there are more ADISCs to be sent */
2870  lpfc_more_adisc(vport);
2871  } else {
2872  /* Check if there are more PLOGIs to be sent */
2873  lpfc_more_plogi(vport);
2874  if (vport->num_disc_nodes == 0) {
2875  spin_lock_irq(shost->host_lock);
2876  vport->fc_flag &= ~FC_NDISC_ACTIVE;
2877  spin_unlock_irq(shost->host_lock);
2878  lpfc_can_disctmo(vport);
2879  lpfc_end_rscn(vport);
2880  }
2881  }
2882  }
2883  }
2884  return;
2885 }
2886 
2901 void
2903 {
2904  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2905  struct lpfc_vport *vport = ndlp->vport;
2906  struct lpfc_hba *phba = vport->phba;
2907  unsigned long flags;
2908  struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
2909 
2910  spin_lock_irqsave(&phba->hbalock, flags);
2911  if (!list_empty(&evtp->evt_listp)) {
2912  spin_unlock_irqrestore(&phba->hbalock, flags);
2913  return;
2914  }
2915 
2916  /* We need to hold the node by incrementing the reference
2917  * count until the queued work is done
2918  */
2919  evtp->evt_arg1 = lpfc_nlp_get(ndlp);
2920  if (evtp->evt_arg1) {
2921  evtp->evt = LPFC_EVT_ELS_RETRY;
2922  list_add_tail(&evtp->evt_listp, &phba->work_list);
2923  lpfc_worker_wake_up(phba);
2924  }
2925  spin_unlock_irqrestore(&phba->hbalock, flags);
2926  return;
2927 }
2928 
2938 void
2940 {
2941  struct lpfc_vport *vport = ndlp->vport;
2942  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2943  uint32_t cmd, did, retry;
2944 
2945  spin_lock_irq(shost->host_lock);
2946  did = ndlp->nlp_DID;
2947  cmd = ndlp->nlp_last_elscmd;
2948  ndlp->nlp_last_elscmd = 0;
2949 
2950  if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2951  spin_unlock_irq(shost->host_lock);
2952  return;
2953  }
2954 
2955  ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2956  spin_unlock_irq(shost->host_lock);
2957  /*
2958  * If a discovery event readded nlp_delayfunc after timer
2959  * firing and before processing the timer, cancel the
2960  * nlp_delayfunc.
2961  */
2962  del_timer_sync(&ndlp->nlp_delayfunc);
2963  retry = ndlp->nlp_retry;
2964  ndlp->nlp_retry = 0;
2965 
2966  switch (cmd) {
2967  case ELS_CMD_FLOGI:
2968  lpfc_issue_els_flogi(vport, ndlp, retry);
2969  break;
2970  case ELS_CMD_PLOGI:
2971  if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
2972  ndlp->nlp_prev_state = ndlp->nlp_state;
2974  }
2975  break;
2976  case ELS_CMD_ADISC:
2977  if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
2978  ndlp->nlp_prev_state = ndlp->nlp_state;
2980  }
2981  break;
2982  case ELS_CMD_PRLI:
2983  if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
2984  ndlp->nlp_prev_state = ndlp->nlp_state;
2985  lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2986  }
2987  break;
2988  case ELS_CMD_LOGO:
2989  if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2990  ndlp->nlp_prev_state = ndlp->nlp_state;
2991  lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2992  }
2993  break;
2994  case ELS_CMD_FDISC:
2995  if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
2996  lpfc_issue_els_fdisc(vport, ndlp, retry);
2997  break;
2998  }
2999  return;
3000 }
3001 
3023 static int
3024 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3025  struct lpfc_iocbq *rspiocb)
3026 {
3027  struct lpfc_vport *vport = cmdiocb->vport;
3028  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3029  IOCB_t *irsp = &rspiocb->iocb;
3030  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3031  struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3032  uint32_t *elscmd;
3033  struct ls_rjt stat;
3034  int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
3035  int logerr = 0;
3036  uint32_t cmd = 0;
3037  uint32_t did;
3038 
3039 
3040  /* Note: context2 may be 0 for internal driver abort
3041  * of delays ELS command.
3042  */
3043 
3044  if (pcmd && pcmd->virt) {
3045  elscmd = (uint32_t *) (pcmd->virt);
3046  cmd = *elscmd++;
3047  }
3048 
3049  if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3050  did = ndlp->nlp_DID;
3051  else {
3052  /* We should only hit this case for retrying PLOGI */
3053  did = irsp->un.elsreq64.remoteID;
3054  ndlp = lpfc_findnode_did(vport, did);
3055  if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3056  && (cmd != ELS_CMD_PLOGI))
3057  return 1;
3058  }
3059 
3061  "Retry ELS: wd7:x%x wd4:x%x did:x%x",
3062  *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3063 
3064  switch (irsp->ulpStatus) {
3065  case IOSTAT_FCP_RSP_ERROR:
3066  break;
3067  case IOSTAT_REMOTE_STOP:
3068  if (phba->sli_rev == LPFC_SLI_REV4) {
3069  /* This IO was aborted by the target, we don't
3070  * know the rxid and because we did not send the
3071  * ABTS we cannot generate and RRQ.
3072  */
3073  lpfc_set_rrq_active(phba, ndlp,
3074  cmdiocb->sli4_lxritag, 0, 0);
3075  }
3076  break;
3077  case IOSTAT_LOCAL_REJECT:
3078  switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3080  if (cmd == ELS_CMD_FLOGI) {
3081  if (PCI_DEVICE_ID_HORNET ==
3082  phba->pcidev->device) {
3084  phba->pport->fc_myDID = 0;
3085  phba->alpa_map[0] = 0;
3086  phba->alpa_map[1] = 0;
3087  }
3088  }
3089  if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
3090  delay = 1000;
3091  retry = 1;
3092  break;
3093 
3094  case IOERR_ILLEGAL_COMMAND:
3095  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3096  "0124 Retry illegal cmd x%x "
3097  "retry:x%x delay:x%x\n",
3098  cmd, cmdiocb->retry, delay);
3099  retry = 1;
3100  /* All command's retry policy */
3101  maxretry = 8;
3102  if (cmdiocb->retry > 2)
3103  delay = 1000;
3104  break;
3105 
3106  case IOERR_NO_RESOURCES:
3107  logerr = 1; /* HBA out of resources */
3108  retry = 1;
3109  if (cmdiocb->retry > 100)
3110  delay = 100;
3111  maxretry = 250;
3112  break;
3113 
3114  case IOERR_ILLEGAL_FRAME:
3115  delay = 100;
3116  retry = 1;
3117  break;
3118 
3120  case IOERR_INVALID_RPI:
3121  retry = 1;
3122  break;
3123  }
3124  break;
3125 
3126  case IOSTAT_NPORT_RJT:
3127  case IOSTAT_FABRIC_RJT:
3128  if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3129  retry = 1;
3130  break;
3131  }
3132  break;
3133 
3134  case IOSTAT_NPORT_BSY:
3135  case IOSTAT_FABRIC_BSY:
3136  logerr = 1; /* Fabric / Remote NPort out of resources */
3137  retry = 1;
3138  break;
3139 
3140  case IOSTAT_LS_RJT:
3141  stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3142  /* Added for Vendor specifc support
3143  * Just keep retrying for these Rsn / Exp codes
3144  */
3145  switch (stat.un.b.lsRjtRsnCode) {
3146  case LSRJT_UNABLE_TPC:
3147  if (stat.un.b.lsRjtRsnCodeExp ==
3149  if (cmd == ELS_CMD_PLOGI) {
3150  delay = 1000;
3151  maxretry = 48;
3152  }
3153  retry = 1;
3154  break;
3155  }
3156  if (stat.un.b.lsRjtRsnCodeExp ==
3158  if (cmd == ELS_CMD_PLOGI) {
3159  delay = 1000;
3160  maxretry = 48;
3161  }
3162  retry = 1;
3163  break;
3164  }
3165  if ((cmd == ELS_CMD_PLOGI) ||
3166  (cmd == ELS_CMD_PRLI)) {
3167  delay = 1000;
3168  maxretry = lpfc_max_els_tries + 1;
3169  retry = 1;
3170  break;
3171  }
3172  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3173  (cmd == ELS_CMD_FDISC) &&
3174  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3175  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3176  "0125 FDISC Failed (x%x). "
3177  "Fabric out of resources\n",
3178  stat.un.lsRjtError);
3179  lpfc_vport_set_state(vport,
3181  }
3182  break;
3183 
3184  case LSRJT_LOGICAL_BSY:
3185  if ((cmd == ELS_CMD_PLOGI) ||
3186  (cmd == ELS_CMD_PRLI)) {
3187  delay = 1000;
3188  maxretry = 48;
3189  } else if (cmd == ELS_CMD_FDISC) {
3190  /* FDISC retry policy */
3191  maxretry = 48;
3192  if (cmdiocb->retry >= 32)
3193  delay = 1000;
3194  }
3195  retry = 1;
3196  break;
3197 
3198  case LSRJT_LOGICAL_ERR:
3199  /* There are some cases where switches return this
3200  * error when they are not ready and should be returning
3201  * Logical Busy. We should delay every time.
3202  */
3203  if (cmd == ELS_CMD_FDISC &&
3204  stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3205  maxretry = 3;
3206  delay = 1000;
3207  retry = 1;
3208  break;
3209  }
3210  case LSRJT_PROTOCOL_ERR:
3211  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3212  (cmd == ELS_CMD_FDISC) &&
3213  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3214  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3215  ) {
3216  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3217  "0122 FDISC Failed (x%x). "
3218  "Fabric Detected Bad WWN\n",
3219  stat.un.lsRjtError);
3220  lpfc_vport_set_state(vport,
3222  }
3223  break;
3224  }
3225  break;
3226 
3227  case IOSTAT_INTERMED_RSP:
3228  case IOSTAT_BA_RJT:
3229  break;
3230 
3231  default:
3232  break;
3233  }
3234 
3235  if (did == FDMI_DID)
3236  retry = 1;
3237 
3238  if ((cmd == ELS_CMD_FLOGI) &&
3239  (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3240  !lpfc_error_lost_link(irsp)) {
3241  /* FLOGI retry policy */
3242  retry = 1;
3243  /* retry FLOGI forever */
3244  maxretry = 0;
3245  if (cmdiocb->retry >= 100)
3246  delay = 5000;
3247  else if (cmdiocb->retry >= 32)
3248  delay = 1000;
3249  } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3250  /* retry FDISCs every second up to devloss */
3251  retry = 1;
3252  maxretry = vport->cfg_devloss_tmo;
3253  delay = 1000;
3254  }
3255 
3256  cmdiocb->retry++;
3257  if (maxretry && (cmdiocb->retry >= maxretry)) {
3258  phba->fc_stat.elsRetryExceeded++;
3259  retry = 0;
3260  }
3261 
3262  if ((vport->load_flag & FC_UNLOADING) != 0)
3263  retry = 0;
3264 
3265  if (retry) {
3266  if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3267  /* Stop retrying PLOGI and FDISC if in FCF discovery */
3268  if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3269  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3270  "2849 Stop retry ELS command "
3271  "x%x to remote NPORT x%x, "
3272  "Data: x%x x%x\n", cmd, did,
3273  cmdiocb->retry, delay);
3274  return 0;
3275  }
3276  }
3277 
3278  /* Retry ELS command <elsCmd> to remote NPORT <did> */
3279  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3280  "0107 Retry ELS command x%x to remote "
3281  "NPORT x%x Data: x%x x%x\n",
3282  cmd, did, cmdiocb->retry, delay);
3283 
3284  if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3285  ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3286  ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3287  IOERR_NO_RESOURCES))) {
3288  /* Don't reset timer for no resources */
3289 
3290  /* If discovery / RSCN timer is running, reset it */
3291  if (timer_pending(&vport->fc_disctmo) ||
3292  (vport->fc_flag & FC_RSCN_MODE))
3293  lpfc_set_disctmo(vport);
3294  }
3295 
3296  phba->fc_stat.elsXmitRetry++;
3297  if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
3298  phba->fc_stat.elsDelayRetry++;
3299  ndlp->nlp_retry = cmdiocb->retry;
3300 
3301  /* delay is specified in milliseconds */
3302  mod_timer(&ndlp->nlp_delayfunc,
3303  jiffies + msecs_to_jiffies(delay));
3304  spin_lock_irq(shost->host_lock);
3305  ndlp->nlp_flag |= NLP_DELAY_TMO;
3306  spin_unlock_irq(shost->host_lock);
3307 
3308  ndlp->nlp_prev_state = ndlp->nlp_state;
3309  if (cmd == ELS_CMD_PRLI)
3310  lpfc_nlp_set_state(vport, ndlp,
3312  else
3313  lpfc_nlp_set_state(vport, ndlp,
3315  ndlp->nlp_last_elscmd = cmd;
3316 
3317  return 1;
3318  }
3319  switch (cmd) {
3320  case ELS_CMD_FLOGI:
3321  lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
3322  return 1;
3323  case ELS_CMD_FDISC:
3324  lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3325  return 1;
3326  case ELS_CMD_PLOGI:
3327  if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3328  ndlp->nlp_prev_state = ndlp->nlp_state;
3329  lpfc_nlp_set_state(vport, ndlp,
3331  }
3332  lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
3333  return 1;
3334  case ELS_CMD_ADISC:
3335  ndlp->nlp_prev_state = ndlp->nlp_state;
3337  lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
3338  return 1;
3339  case ELS_CMD_PRLI:
3340  ndlp->nlp_prev_state = ndlp->nlp_state;
3341  lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3342  lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3343  return 1;
3344  case ELS_CMD_LOGO:
3345  ndlp->nlp_prev_state = ndlp->nlp_state;
3346  lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3347  lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3348  return 1;
3349  }
3350  }
3351  /* No retry ELS command <elsCmd> to remote NPORT <did> */
3352  if (logerr) {
3353  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3354  "0137 No retry ELS command x%x to remote "
3355  "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3356  cmd, did, irsp->ulpStatus,
3357  irsp->un.ulpWord[4]);
3358  }
3359  else {
3360  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3361  "0108 No retry ELS command x%x to remote "
3362  "NPORT x%x Retried:%d Error:x%x/%x\n",
3363  cmd, did, cmdiocb->retry, irsp->ulpStatus,
3364  irsp->un.ulpWord[4]);
3365  }
3366  return 0;
3367 }
3368 
3383 static int
3384 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3385 {
3386  struct lpfc_dmabuf *buf_ptr;
3387 
3388  /* Free the response before processing the command. */
3389  if (!list_empty(&buf_ptr1->list)) {
3390  list_remove_head(&buf_ptr1->list, buf_ptr,
3391  struct lpfc_dmabuf,
3392  list);
3393  lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3394  kfree(buf_ptr);
3395  }
3396  lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3397  kfree(buf_ptr1);
3398  return 0;
3399 }
3400 
3413 static int
3414 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3415 {
3416  lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3417  kfree(buf_ptr);
3418  return 0;
3419 }
3420 
3448 int
3449 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3450 {
3451  struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3452  struct lpfc_nodelist *ndlp;
3453 
3454  ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3455  if (ndlp) {
3456  if (ndlp->nlp_flag & NLP_DEFER_RM) {
3457  lpfc_nlp_put(ndlp);
3458 
3459  /* If the ndlp is not being used by another discovery
3460  * thread, free it.
3461  */
3462  if (!lpfc_nlp_not_used(ndlp)) {
3463  /* If ndlp is being used by another discovery
3464  * thread, just clear NLP_DEFER_RM
3465  */
3466  ndlp->nlp_flag &= ~NLP_DEFER_RM;
3467  }
3468  }
3469  else
3470  lpfc_nlp_put(ndlp);
3471  elsiocb->context1 = NULL;
3472  }
3473  /* context2 = cmd, context2->next = rsp, context3 = bpl */
3474  if (elsiocb->context2) {
3475  if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3476  /* Firmware could still be in progress of DMAing
3477  * payload, so don't free data buffer till after
3478  * a hbeat.
3479  */
3480  elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3481  buf_ptr = elsiocb->context2;
3482  elsiocb->context2 = NULL;
3483  if (buf_ptr) {
3484  buf_ptr1 = NULL;
3485  spin_lock_irq(&phba->hbalock);
3486  if (!list_empty(&buf_ptr->list)) {
3487  list_remove_head(&buf_ptr->list,
3488  buf_ptr1, struct lpfc_dmabuf,
3489  list);
3490  INIT_LIST_HEAD(&buf_ptr1->list);
3491  list_add_tail(&buf_ptr1->list,
3492  &phba->elsbuf);
3493  phba->elsbuf_cnt++;
3494  }
3495  INIT_LIST_HEAD(&buf_ptr->list);
3496  list_add_tail(&buf_ptr->list, &phba->elsbuf);
3497  phba->elsbuf_cnt++;
3498  spin_unlock_irq(&phba->hbalock);
3499  }
3500  } else {
3501  buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3502  lpfc_els_free_data(phba, buf_ptr1);
3503  }
3504  }
3505 
3506  if (elsiocb->context3) {
3507  buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3508  lpfc_els_free_bpl(phba, buf_ptr);
3509  }
3510  lpfc_sli_release_iocbq(phba, elsiocb);
3511  return 0;
3512 }
3513 
3531 static void
3532 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3533  struct lpfc_iocbq *rspiocb)
3534 {
3535  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3536  struct lpfc_vport *vport = cmdiocb->vport;
3537  IOCB_t *irsp;
3538 
3539  irsp = &rspiocb->iocb;
3541  "ACC LOGO cmpl: status:x%x/x%x did:x%x",
3542  irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3543  /* ACC to LOGO completes to NPort <nlp_DID> */
3544  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3545  "0109 ACC to LOGO completes to NPort x%x "
3546  "Data: x%x x%x x%x\n",
3547  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3548  ndlp->nlp_rpi);
3549 
3550  if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3551  /* NPort Recovery mode or node is just allocated */
3552  if (!lpfc_nlp_not_used(ndlp)) {
3553  /* If the ndlp is being used by another discovery
3554  * thread, just unregister the RPI.
3555  */
3556  lpfc_unreg_rpi(vport, ndlp);
3557  } else {
3558  /* Indicate the node has already released, should
3559  * not reference to it from within lpfc_els_free_iocb.
3560  */
3561  cmdiocb->context1 = NULL;
3562  }
3563  }
3564 
3565  /*
3566  * The driver received a LOGO from the rport and has ACK'd it.
3567  * At this point, the driver is done so release the IOCB
3568  */
3569  lpfc_els_free_iocb(phba, cmdiocb);
3570 
3571  /*
3572  * Remove the ndlp reference if it's a fabric node that has
3573  * sent us an unsolicted LOGO.
3574  */
3575  if (ndlp->nlp_type & NLP_FABRIC)
3576  lpfc_nlp_put(ndlp);
3577 
3578  return;
3579 }
3580 
3594 void
3596 {
3597  struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3598  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3599 
3600  pmb->context1 = NULL;
3601  pmb->context2 = NULL;
3602 
3603  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3604  kfree(mp);
3605  mempool_free(pmb, phba->mbox_mem_pool);
3606  if (ndlp) {
3607  if (NLP_CHK_NODE_ACT(ndlp)) {
3608  lpfc_nlp_put(ndlp);
3609  /* This is the end of the default RPI cleanup logic for
3610  * this ndlp. If no other discovery threads are using
3611  * this ndlp, free all resources associated with it.
3612  */
3613  lpfc_nlp_not_used(ndlp);
3614  } else {
3615  lpfc_drop_node(ndlp->vport, ndlp);
3616  }
3617  }
3618 
3619  return;
3620 }
3621 
3638 static void
3639 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3640  struct lpfc_iocbq *rspiocb)
3641 {
3642  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3643  struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3644  struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3645  IOCB_t *irsp;
3646  uint8_t *pcmd;
3647  LPFC_MBOXQ_t *mbox = NULL;
3648  struct lpfc_dmabuf *mp = NULL;
3649  uint32_t ls_rjt = 0;
3650 
3651  irsp = &rspiocb->iocb;
3652 
3653  if (cmdiocb->context_un.mbox)
3654  mbox = cmdiocb->context_un.mbox;
3655 
3656  /* First determine if this is a LS_RJT cmpl. Note, this callback
3657  * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3658  */
3659  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3660  if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3661  (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3662  /* A LS_RJT associated with Default RPI cleanup has its own
3663  * separate code path.
3664  */
3665  if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3666  ls_rjt = 1;
3667  }
3668 
3669  /* Check to see if link went down during discovery */
3670  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3671  if (mbox) {
3672  mp = (struct lpfc_dmabuf *) mbox->context1;
3673  if (mp) {
3674  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3675  kfree(mp);
3676  }
3677  mempool_free(mbox, phba->mbox_mem_pool);
3678  }
3679  if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3680  (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3681  if (lpfc_nlp_not_used(ndlp)) {
3682  ndlp = NULL;
3683  /* Indicate the node has already released,
3684  * should not reference to it from within
3685  * the routine lpfc_els_free_iocb.
3686  */
3687  cmdiocb->context1 = NULL;
3688  }
3689  goto out;
3690  }
3691 
3693  "ELS rsp cmpl: status:x%x/x%x did:x%x",
3694  irsp->ulpStatus, irsp->un.ulpWord[4],
3695  cmdiocb->iocb.un.elsreq64.remoteID);
3696  /* ELS response tag <ulpIoTag> completes */
3697  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3698  "0110 ELS response tag x%x completes "
3699  "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3700  cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3701  rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3702  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3703  ndlp->nlp_rpi);
3704  if (mbox) {
3705  if ((rspiocb->iocb.ulpStatus == 0)
3706  && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3707  lpfc_unreg_rpi(vport, ndlp);
3708  /* Increment reference count to ndlp to hold the
3709  * reference to ndlp for the callback function.
3710  */
3711  mbox->context2 = lpfc_nlp_get(ndlp);
3712  mbox->vport = vport;
3713  if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3714  mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3716  }
3717  else {
3719  ndlp->nlp_prev_state = ndlp->nlp_state;
3720  lpfc_nlp_set_state(vport, ndlp,
3722  }
3723  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3724  != MBX_NOT_FINISHED)
3725  goto out;
3726  else
3727  /* Decrement the ndlp reference count we
3728  * set for this failed mailbox command.
3729  */
3730  lpfc_nlp_put(ndlp);
3731 
3732  /* ELS rsp: Cannot issue reg_login for <NPortid> */
3733  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3734  "0138 ELS rsp: Cannot issue reg_login for x%x "
3735  "Data: x%x x%x x%x\n",
3736  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3737  ndlp->nlp_rpi);
3738 
3739  if (lpfc_nlp_not_used(ndlp)) {
3740  ndlp = NULL;
3741  /* Indicate node has already been released,
3742  * should not reference to it from within
3743  * the routine lpfc_els_free_iocb.
3744  */
3745  cmdiocb->context1 = NULL;
3746  }
3747  } else {
3748  /* Do not drop node for lpfc_els_abort'ed ELS cmds */
3749  if (!lpfc_error_lost_link(irsp) &&
3750  ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3751  if (lpfc_nlp_not_used(ndlp)) {
3752  ndlp = NULL;
3753  /* Indicate node has already been
3754  * released, should not reference
3755  * to it from within the routine
3756  * lpfc_els_free_iocb.
3757  */
3758  cmdiocb->context1 = NULL;
3759  }
3760  }
3761  }
3762  mp = (struct lpfc_dmabuf *) mbox->context1;
3763  if (mp) {
3764  lpfc_mbuf_free(phba, mp->virt, mp->phys);
3765  kfree(mp);
3766  }
3767  mempool_free(mbox, phba->mbox_mem_pool);
3768  }
3769 out:
3770  if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3771  spin_lock_irq(shost->host_lock);
3773  spin_unlock_irq(shost->host_lock);
3774 
3775  /* If the node is not being used by another discovery thread,
3776  * and we are sending a reject, we are done with it.
3777  * Release driver reference count here and free associated
3778  * resources.
3779  */
3780  if (ls_rjt)
3781  if (lpfc_nlp_not_used(ndlp))
3782  /* Indicate node has already been released,
3783  * should not reference to it from within
3784  * the routine lpfc_els_free_iocb.
3785  */
3786  cmdiocb->context1 = NULL;
3787  }
3788 
3789  lpfc_els_free_iocb(phba, cmdiocb);
3790  return;
3791 }
3792 
3818 int
3820  struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3821  LPFC_MBOXQ_t *mbox)
3822 {
3823  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3824  struct lpfc_hba *phba = vport->phba;
3825  IOCB_t *icmd;
3826  IOCB_t *oldcmd;
3827  struct lpfc_iocbq *elsiocb;
3828  struct lpfc_sli *psli;
3829  uint8_t *pcmd;
3830  uint16_t cmdsize;
3831  int rc;
3832  ELS_PKT *els_pkt_ptr;
3833 
3834  psli = &phba->sli;
3835  oldcmd = &oldiocb->iocb;
3836 
3837  switch (flag) {
3838  case ELS_CMD_ACC:
3839  cmdsize = sizeof(uint32_t);
3840  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3841  ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3842  if (!elsiocb) {
3843  spin_lock_irq(shost->host_lock);
3844  ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3845  spin_unlock_irq(shost->host_lock);
3846  return 1;
3847  }
3848 
3849  icmd = &elsiocb->iocb;
3850  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3851  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3852  pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3853  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3854  pcmd += sizeof(uint32_t);
3855 
3857  "Issue ACC: did:x%x flg:x%x",
3858  ndlp->nlp_DID, ndlp->nlp_flag, 0);
3859  break;
3860  case ELS_CMD_PLOGI:
3861  cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3862  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3863  ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3864  if (!elsiocb)
3865  return 1;
3866 
3867  icmd = &elsiocb->iocb;
3868  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3869  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3870  pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3871 
3872  if (mbox)
3873  elsiocb->context_un.mbox = mbox;
3874 
3875  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3876  pcmd += sizeof(uint32_t);
3877  memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3878 
3880  "Issue ACC PLOGI: did:x%x flg:x%x",
3881  ndlp->nlp_DID, ndlp->nlp_flag, 0);
3882  break;
3883  case ELS_CMD_PRLO:
3884  cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3885  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3886  ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3887  if (!elsiocb)
3888  return 1;
3889 
3890  icmd = &elsiocb->iocb;
3891  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3892  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3893  pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3894 
3895  memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
3896  sizeof(uint32_t) + sizeof(PRLO));
3897  *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3898  els_pkt_ptr = (ELS_PKT *) pcmd;
3899  els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
3900 
3902  "Issue ACC PRLO: did:x%x flg:x%x",
3903  ndlp->nlp_DID, ndlp->nlp_flag, 0);
3904  break;
3905  default:
3906  return 1;
3907  }
3908  /* Xmit ELS ACC response tag <ulpIoTag> */
3909  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3910  "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3911  "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
3912  "fc_flag x%x\n",
3913  elsiocb->iotag, elsiocb->iocb.ulpContext,
3914  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3915  ndlp->nlp_rpi, vport->fc_flag);
3916  if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3917  spin_lock_irq(shost->host_lock);
3918  ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3919  spin_unlock_irq(shost->host_lock);
3920  elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3921  } else {
3922  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3923  }
3924 
3925  phba->fc_stat.elsXmitACC++;
3926  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3927  if (rc == IOCB_ERROR) {
3928  lpfc_els_free_iocb(phba, elsiocb);
3929  return 1;
3930  }
3931  return 0;
3932 }
3933 
3956 int
3957 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3958  struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3959  LPFC_MBOXQ_t *mbox)
3960 {
3961  struct lpfc_hba *phba = vport->phba;
3962  IOCB_t *icmd;
3963  IOCB_t *oldcmd;
3964  struct lpfc_iocbq *elsiocb;
3965  struct lpfc_sli *psli;
3966  uint8_t *pcmd;
3967  uint16_t cmdsize;
3968  int rc;
3969 
3970  psli = &phba->sli;
3971  cmdsize = 2 * sizeof(uint32_t);
3972  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3973  ndlp->nlp_DID, ELS_CMD_LS_RJT);
3974  if (!elsiocb)
3975  return 1;
3976 
3977  icmd = &elsiocb->iocb;
3978  oldcmd = &oldiocb->iocb;
3979  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3980  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3981  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3982 
3983  *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
3984  pcmd += sizeof(uint32_t);
3985  *((uint32_t *) (pcmd)) = rejectError;
3986 
3987  if (mbox)
3988  elsiocb->context_un.mbox = mbox;
3989 
3990  /* Xmit ELS RJT <err> response tag <ulpIoTag> */
3991  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3992  "0129 Xmit ELS RJT x%x response tag x%x "
3993  "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3994  "rpi x%x\n",
3995  rejectError, elsiocb->iotag,
3996  elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3997  ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
3999  "Issue LS_RJT: did:x%x flg:x%x err:x%x",
4000  ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4001 
4002  phba->fc_stat.elsXmitLSRJT++;
4003  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4004  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4005 
4006  if (rc == IOCB_ERROR) {
4007  lpfc_els_free_iocb(phba, elsiocb);
4008  return 1;
4009  }
4010  return 0;
4011 }
4012 
4032 int
4033 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4034  struct lpfc_nodelist *ndlp)
4035 {
4036  struct lpfc_hba *phba = vport->phba;
4037  ADISC *ap;
4038  IOCB_t *icmd, *oldcmd;
4039  struct lpfc_iocbq *elsiocb;
4040  uint8_t *pcmd;
4041  uint16_t cmdsize;
4042  int rc;
4043 
4044  cmdsize = sizeof(uint32_t) + sizeof(ADISC);
4045  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4046  ndlp->nlp_DID, ELS_CMD_ACC);
4047  if (!elsiocb)
4048  return 1;
4049 
4050  icmd = &elsiocb->iocb;
4051  oldcmd = &oldiocb->iocb;
4052  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4053  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4054 
4055  /* Xmit ADISC ACC response tag <ulpIoTag> */
4056  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4057  "0130 Xmit ADISC ACC response iotag x%x xri: "
4058  "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4059  elsiocb->iotag, elsiocb->iocb.ulpContext,
4060  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4061  ndlp->nlp_rpi);
4062  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4063 
4064  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4065  pcmd += sizeof(uint32_t);
4066 
4067  ap = (ADISC *) (pcmd);
4068  ap->hardAL_PA = phba->fc_pref_ALPA;
4069  memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4070  memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4071  ap->DID = be32_to_cpu(vport->fc_myDID);
4072 
4074  "Issue ACC ADISC: did:x%x flg:x%x",
4075  ndlp->nlp_DID, ndlp->nlp_flag, 0);
4076 
4077  phba->fc_stat.elsXmitACC++;
4078  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4079  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4080  if (rc == IOCB_ERROR) {
4081  lpfc_els_free_iocb(phba, elsiocb);
4082  return 1;
4083  }
4084  return 0;
4085 }
4086 
4106 int
4107 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4108  struct lpfc_nodelist *ndlp)
4109 {
4110  struct lpfc_hba *phba = vport->phba;
4111  PRLI *npr;
4112  lpfc_vpd_t *vpd;
4113  IOCB_t *icmd;
4114  IOCB_t *oldcmd;
4115  struct lpfc_iocbq *elsiocb;
4116  struct lpfc_sli *psli;
4117  uint8_t *pcmd;
4118  uint16_t cmdsize;
4119  int rc;
4120 
4121  psli = &phba->sli;
4122 
4123  cmdsize = sizeof(uint32_t) + sizeof(PRLI);
4124  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4125  ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
4126  if (!elsiocb)
4127  return 1;
4128 
4129  icmd = &elsiocb->iocb;
4130  oldcmd = &oldiocb->iocb;
4131  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4132  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4133 
4134  /* Xmit PRLI ACC response tag <ulpIoTag> */
4135  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4136  "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4137  "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4138  elsiocb->iotag, elsiocb->iocb.ulpContext,
4139  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4140  ndlp->nlp_rpi);
4141  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4142 
4143  *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
4144  pcmd += sizeof(uint32_t);
4145 
4146  /* For PRLI, remainder of payload is PRLI parameter page */
4147  memset(pcmd, 0, sizeof(PRLI));
4148 
4149  npr = (PRLI *) pcmd;
4150  vpd = &phba->vpd;
4151  /*
4152  * If the remote port is a target and our firmware version is 3.20 or
4153  * later, set the following bits for FC-TAPE support.
4154  */
4155  if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4156  (vpd->rev.feaLevelHigh >= 0x02)) {
4157  npr->ConfmComplAllowed = 1;
4158  npr->Retry = 1;
4159  npr->TaskRetryIdReq = 1;
4160  }
4161 
4163  npr->estabImagePair = 1;
4164  npr->readXferRdyDis = 1;
4165  npr->ConfmComplAllowed = 1;
4166 
4167  npr->prliType = PRLI_FCP_TYPE;
4168  npr->initiatorFunc = 1;
4169 
4171  "Issue ACC PRLI: did:x%x flg:x%x",
4172  ndlp->nlp_DID, ndlp->nlp_flag, 0);
4173 
4174  phba->fc_stat.elsXmitACC++;
4175  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4176 
4177  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4178  if (rc == IOCB_ERROR) {
4179  lpfc_els_free_iocb(phba, elsiocb);
4180  return 1;
4181  }
4182  return 0;
4183 }
4184 
4211 static int
4212 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4213  struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4214 {
4215  struct lpfc_hba *phba = vport->phba;
4216  RNID *rn;
4217  IOCB_t *icmd, *oldcmd;
4218  struct lpfc_iocbq *elsiocb;
4219  struct lpfc_sli *psli;
4220  uint8_t *pcmd;
4221  uint16_t cmdsize;
4222  int rc;
4223 
4224  psli = &phba->sli;
4225  cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4226  + (2 * sizeof(struct lpfc_name));
4227  if (format)
4228  cmdsize += sizeof(RNID_TOP_DISC);
4229 
4230  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4231  ndlp->nlp_DID, ELS_CMD_ACC);
4232  if (!elsiocb)
4233  return 1;
4234 
4235  icmd = &elsiocb->iocb;
4236  oldcmd = &oldiocb->iocb;
4237  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4238  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4239 
4240  /* Xmit RNID ACC response tag <ulpIoTag> */
4241  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4242  "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4243  elsiocb->iotag, elsiocb->iocb.ulpContext);
4244  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4245  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4246  pcmd += sizeof(uint32_t);
4247 
4248  memset(pcmd, 0, sizeof(RNID));
4249  rn = (RNID *) (pcmd);
4250  rn->Format = format;
4251  rn->CommonLen = (2 * sizeof(struct lpfc_name));
4252  memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4253  memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4254  switch (format) {
4255  case 0:
4256  rn->SpecificLen = 0;
4257  break;
4258  case RNID_TOPOLOGY_DISC:
4259  rn->SpecificLen = sizeof(RNID_TOP_DISC);
4261  &vport->fc_portname, sizeof(struct lpfc_name));
4263  rn->un.topologyDisc.physPort = 0;
4264  rn->un.topologyDisc.attachedNodes = 0;
4265  break;
4266  default:
4267  rn->CommonLen = 0;
4268  rn->SpecificLen = 0;
4269  break;
4270  }
4271 
4273  "Issue ACC RNID: did:x%x flg:x%x",
4274  ndlp->nlp_DID, ndlp->nlp_flag, 0);
4275 
4276  phba->fc_stat.elsXmitACC++;
4277  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4278 
4279  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4280  if (rc == IOCB_ERROR) {
4281  lpfc_els_free_iocb(phba, elsiocb);
4282  return 1;
4283  }
4284  return 0;
4285 }
4286 
4295 static void
4296 lpfc_els_clear_rrq(struct lpfc_vport *vport,
4297  struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4298 {
4299  struct lpfc_hba *phba = vport->phba;
4300  uint8_t *pcmd;
4301  struct RRQ *rrq;
4302  uint16_t rxid;
4303  uint16_t xri;
4304  struct lpfc_node_rrq *prrq;
4305 
4306 
4307  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4308  pcmd += sizeof(uint32_t);
4309  rrq = (struct RRQ *)pcmd;
4310  rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4311  rxid = bf_get(rrq_rxid, rrq);
4312 
4313  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4314  "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4315  " x%x x%x\n",
4316  be32_to_cpu(bf_get(rrq_did, rrq)),
4317  bf_get(rrq_oxid, rrq),
4318  rxid,
4319  iocb->iotag, iocb->iocb.ulpContext);
4320 
4322  "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
4323  ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4324  if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4325  xri = bf_get(rrq_oxid, rrq);
4326  else
4327  xri = rxid;
4328  prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
4329  if (prrq)
4330  lpfc_clr_rrq_active(phba, xri, prrq);
4331  return;
4332 }
4333 
4345 static int
4346 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4347  struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4348 {
4349  struct lpfc_hba *phba = vport->phba;
4350  struct lpfc_iocbq *elsiocb;
4351  struct lpfc_sli *psli;
4352  uint8_t *pcmd;
4353  uint16_t cmdsize;
4354  int rc;
4355 
4356  psli = &phba->sli;
4357  cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4358 
4359  /* The accumulated length can exceed the BPL_SIZE. For
4360  * now, use this as the limit
4361  */
4362  if (cmdsize > LPFC_BPL_SIZE)
4363  cmdsize = LPFC_BPL_SIZE;
4364  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4365  ndlp->nlp_DID, ELS_CMD_ACC);
4366  if (!elsiocb)
4367  return 1;
4368 
4369  elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4370  elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4371 
4372  /* Xmit ECHO ACC response tag <ulpIoTag> */
4373  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4374  "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4375  elsiocb->iotag, elsiocb->iocb.ulpContext);
4376  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4377  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4378  pcmd += sizeof(uint32_t);
4379  memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4380 
4382  "Issue ACC ECHO: did:x%x flg:x%x",
4383  ndlp->nlp_DID, ndlp->nlp_flag, 0);
4384 
4385  phba->fc_stat.elsXmitACC++;
4386  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4387 
4388  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4389  if (rc == IOCB_ERROR) {
4390  lpfc_els_free_iocb(phba, elsiocb);
4391  return 1;
4392  }
4393  return 0;
4394 }
4395 
4415 int
4417 {
4418  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4419  struct lpfc_nodelist *ndlp, *next_ndlp;
4420  int sentadisc = 0;
4421 
4422  /* go thru NPR nodes and issue any remaining ELS ADISCs */
4423  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4424  if (!NLP_CHK_NODE_ACT(ndlp))
4425  continue;
4426  if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4427  (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4428  (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
4429  spin_lock_irq(shost->host_lock);
4430  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4431  spin_unlock_irq(shost->host_lock);
4432  ndlp->nlp_prev_state = ndlp->nlp_state;
4434  lpfc_issue_els_adisc(vport, ndlp, 0);
4435  sentadisc++;
4436  vport->num_disc_nodes++;
4437  if (vport->num_disc_nodes >=
4438  vport->cfg_discovery_threads) {
4439  spin_lock_irq(shost->host_lock);
4440  vport->fc_flag |= FC_NLP_MORE;
4441  spin_unlock_irq(shost->host_lock);
4442  break;
4443  }
4444  }
4445  }
4446  if (sentadisc == 0) {
4447  spin_lock_irq(shost->host_lock);
4448  vport->fc_flag &= ~FC_NLP_MORE;
4449  spin_unlock_irq(shost->host_lock);
4450  }
4451  return sentadisc;
4452 }
4453 
4473 int
4475 {
4476  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4477  struct lpfc_nodelist *ndlp, *next_ndlp;
4478  int sentplogi = 0;
4479 
4480  /* go thru NPR nodes and issue any remaining ELS PLOGIs */
4481  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4482  if (!NLP_CHK_NODE_ACT(ndlp))
4483  continue;
4484  if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4485  (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4486  (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4487  (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4488  ndlp->nlp_prev_state = ndlp->nlp_state;
4490  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4491  sentplogi++;
4492  vport->num_disc_nodes++;
4493  if (vport->num_disc_nodes >=
4494  vport->cfg_discovery_threads) {
4495  spin_lock_irq(shost->host_lock);
4496  vport->fc_flag |= FC_NLP_MORE;
4497  spin_unlock_irq(shost->host_lock);
4498  break;
4499  }
4500  }
4501  }
4502  if (sentplogi) {
4503  lpfc_set_disctmo(vport);
4504  }
4505  else {
4506  spin_lock_irq(shost->host_lock);
4507  vport->fc_flag &= ~FC_NLP_MORE;
4508  spin_unlock_irq(shost->host_lock);
4509  }
4510  return sentplogi;
4511 }
4512 
4522 void
4524 {
4525  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4526  struct lpfc_hba *phba = vport->phba;
4527  int i;
4528 
4529  spin_lock_irq(shost->host_lock);
4530  if (vport->fc_rscn_flush) {
4531  /* Another thread is walking fc_rscn_id_list on this vport */
4532  spin_unlock_irq(shost->host_lock);
4533  return;
4534  }
4535  /* Indicate we are walking lpfc_els_flush_rscn on this vport */
4536  vport->fc_rscn_flush = 1;
4537  spin_unlock_irq(shost->host_lock);
4538 
4539  for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4540  lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
4541  vport->fc_rscn_id_list[i] = NULL;
4542  }
4543  spin_lock_irq(shost->host_lock);
4544  vport->fc_rscn_id_cnt = 0;
4545  vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4546  spin_unlock_irq(shost->host_lock);
4547  lpfc_can_disctmo(vport);
4548  /* Indicate we are done walking this fc_rscn_id_list */
4549  vport->fc_rscn_flush = 0;
4550 }
4551 
4564 int
4566 {
4567  D_ID ns_did;
4568  D_ID rscn_did;
4569  uint32_t *lp;
4571  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4572 
4573  ns_did.un.word = did;
4574 
4575  /* Never match fabric nodes for RSCNs */
4576  if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4577  return 0;
4578 
4579  /* If we are doing a FULL RSCN rediscovery, match everything */
4580  if (vport->fc_flag & FC_RSCN_DISCOVERY)
4581  return did;
4582 
4583  spin_lock_irq(shost->host_lock);
4584  if (vport->fc_rscn_flush) {
4585  /* Another thread is walking fc_rscn_id_list on this vport */
4586  spin_unlock_irq(shost->host_lock);
4587  return 0;
4588  }
4589  /* Indicate we are walking fc_rscn_id_list on this vport */
4590  vport->fc_rscn_flush = 1;
4591  spin_unlock_irq(shost->host_lock);
4592  for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4593  lp = vport->fc_rscn_id_list[i]->virt;
4594  payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4595  payload_len -= sizeof(uint32_t); /* take off word 0 */
4596  while (payload_len) {
4597  rscn_did.un.word = be32_to_cpu(*lp++);
4598  payload_len -= sizeof(uint32_t);
4599  switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4601  if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4602  && (ns_did.un.b.area == rscn_did.un.b.area)
4603  && (ns_did.un.b.id == rscn_did.un.b.id))
4604  goto return_did_out;
4605  break;
4607  if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4608  && (ns_did.un.b.area == rscn_did.un.b.area))
4609  goto return_did_out;
4610  break;
4612  if (ns_did.un.b.domain == rscn_did.un.b.domain)
4613  goto return_did_out;
4614  break;
4616  goto return_did_out;
4617  }
4618  }
4619  }
4620  /* Indicate we are done with walking fc_rscn_id_list on this vport */
4621  vport->fc_rscn_flush = 0;
4622  return 0;
4623 return_did_out:
4624  /* Indicate we are done with walking fc_rscn_id_list on this vport */
4625  vport->fc_rscn_flush = 0;
4626  return did;
4627 }
4628 
4640 static int
4641 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4642 {
4643  struct lpfc_nodelist *ndlp = NULL;
4644 
4645  /* Move all affected nodes by pending RSCNs to NPR state. */
4646  list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4647  if (!NLP_CHK_NODE_ACT(ndlp) ||
4648  (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4649  !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4650  continue;
4651  lpfc_disc_state_machine(vport, ndlp, NULL,
4653  lpfc_cancel_retry_delay_tmo(vport, ndlp);
4654  }
4655  return 0;
4656 }
4657 
4666 static void
4667 lpfc_send_rscn_event(struct lpfc_vport *vport,
4668  struct lpfc_iocbq *cmdiocb)
4669 {
4670  struct lpfc_dmabuf *pcmd;
4671  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4672  uint32_t *payload_ptr;
4674  struct lpfc_rscn_event_header *rscn_event_data;
4675 
4676  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4677  payload_ptr = (uint32_t *) pcmd->virt;
4678  payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4679 
4680  rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4681  payload_len, GFP_KERNEL);
4682  if (!rscn_event_data) {
4683  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4684  "0147 Failed to allocate memory for RSCN event\n");
4685  return;
4686  }
4687  rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4688  rscn_event_data->payload_length = payload_len;
4689  memcpy(rscn_event_data->rscn_payload, payload_ptr,
4690  payload_len);
4691 
4694  sizeof(struct lpfc_els_event_header) + payload_len,
4695  (char *)rscn_event_data,
4697 
4698  kfree(rscn_event_data);
4699 }
4700 
4723 static int
4724 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4725  struct lpfc_nodelist *ndlp)
4726 {
4727  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4728  struct lpfc_hba *phba = vport->phba;
4729  struct lpfc_dmabuf *pcmd;
4730  uint32_t *lp, *datap;
4731  IOCB_t *icmd;
4732  uint32_t payload_len, length, nportid, *cmd;
4733  int rscn_cnt;
4734  int rscn_id = 0, hba_id = 0;
4735  int i;
4736 
4737  icmd = &cmdiocb->iocb;
4738  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4739  lp = (uint32_t *) pcmd->virt;
4740 
4741  payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4742  payload_len -= sizeof(uint32_t); /* take off word 0 */
4743  /* RSCN received */
4745  "0214 RSCN received Data: x%x x%x x%x x%x\n",
4746  vport->fc_flag, payload_len, *lp,
4747  vport->fc_rscn_id_cnt);
4748 
4749  /* Send an RSCN event to the management application */
4750  lpfc_send_rscn_event(vport, cmdiocb);
4751 
4752  for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4754  FCH_EVT_RSCN, lp[i]);
4755 
4756  /* If we are about to begin discovery, just ACC the RSCN.
4757  * Discovery processing will satisfy it.
4758  */
4759  if (vport->port_state <= LPFC_NS_QRY) {
4761  "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4762  ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4763 
4764  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4765  return 0;
4766  }
4767 
4768  /* If this RSCN just contains NPortIDs for other vports on this HBA,
4769  * just ACC and ignore it.
4770  */
4771  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4772  !(vport->cfg_peer_port_login)) {
4773  i = payload_len;
4774  datap = lp;
4775  while (i > 0) {
4776  nportid = *datap++;
4777  nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4778  i -= sizeof(uint32_t);
4779  rscn_id++;
4780  if (lpfc_find_vport_by_did(phba, nportid))
4781  hba_id++;
4782  }
4783  if (rscn_id == hba_id) {
4784  /* ALL NPortIDs in RSCN are on HBA */
4786  "0219 Ignore RSCN "
4787  "Data: x%x x%x x%x x%x\n",
4788  vport->fc_flag, payload_len,
4789  *lp, vport->fc_rscn_id_cnt);
4791  "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
4792  ndlp->nlp_DID, vport->port_state,
4793  ndlp->nlp_flag);
4794 
4795  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4796  ndlp, NULL);
4797  return 0;
4798  }
4799  }
4800 
4801  spin_lock_irq(shost->host_lock);
4802  if (vport->fc_rscn_flush) {
4803  /* Another thread is walking fc_rscn_id_list on this vport */
4804  vport->fc_flag |= FC_RSCN_DISCOVERY;
4805  spin_unlock_irq(shost->host_lock);
4806  /* Send back ACC */
4807  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4808  return 0;
4809  }
4810  /* Indicate we are walking fc_rscn_id_list on this vport */
4811  vport->fc_rscn_flush = 1;
4812  spin_unlock_irq(shost->host_lock);
4813  /* Get the array count after successfully have the token */
4814  rscn_cnt = vport->fc_rscn_id_cnt;
4815  /* If we are already processing an RSCN, save the received
4816  * RSCN payload buffer, cmdiocb->context2 to process later.
4817  */
4818  if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4820  "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
4821  ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4822 
4823  spin_lock_irq(shost->host_lock);
4824  vport->fc_flag |= FC_RSCN_DEFERRED;
4825  if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4826  !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4827  vport->fc_flag |= FC_RSCN_MODE;
4828  spin_unlock_irq(shost->host_lock);
4829  if (rscn_cnt) {
4830  cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4831  length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4832  }
4833  if ((rscn_cnt) &&
4834  (payload_len + length <= LPFC_BPL_SIZE)) {
4835  *cmd &= ELS_CMD_MASK;
4836  *cmd |= cpu_to_be32(payload_len + length);
4837  memcpy(((uint8_t *)cmd) + length, lp,
4838  payload_len);
4839  } else {
4840  vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4841  vport->fc_rscn_id_cnt++;
4842  /* If we zero, cmdiocb->context2, the calling
4843  * routine will not try to free it.
4844  */
4845  cmdiocb->context2 = NULL;
4846  }
4847  /* Deferred RSCN */
4849  "0235 Deferred RSCN "
4850  "Data: x%x x%x x%x\n",
4851  vport->fc_rscn_id_cnt, vport->fc_flag,
4852  vport->port_state);
4853  } else {
4854  vport->fc_flag |= FC_RSCN_DISCOVERY;
4855  spin_unlock_irq(shost->host_lock);
4856  /* ReDiscovery RSCN */
4858  "0234 ReDiscovery RSCN "
4859  "Data: x%x x%x x%x\n",
4860  vport->fc_rscn_id_cnt, vport->fc_flag,
4861  vport->port_state);
4862  }
4863  /* Indicate we are done walking fc_rscn_id_list on this vport */
4864  vport->fc_rscn_flush = 0;
4865  /* Send back ACC */
4866  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4867  /* send RECOVERY event for ALL nodes that match RSCN payload */
4868  lpfc_rscn_recovery_check(vport);
4869  spin_lock_irq(shost->host_lock);
4870  vport->fc_flag &= ~FC_RSCN_DEFERRED;
4871  spin_unlock_irq(shost->host_lock);
4872  return 0;
4873  }
4875  "RCV RSCN: did:x%x/ste:x%x flg:x%x",
4876  ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4877 
4878  spin_lock_irq(shost->host_lock);
4879  vport->fc_flag |= FC_RSCN_MODE;
4880  spin_unlock_irq(shost->host_lock);
4881  vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4882  /* Indicate we are done walking fc_rscn_id_list on this vport */
4883  vport->fc_rscn_flush = 0;
4884  /*
4885  * If we zero, cmdiocb->context2, the calling routine will
4886  * not try to free it.
4887  */
4888  cmdiocb->context2 = NULL;
4889  lpfc_set_disctmo(vport);
4890  /* Send back ACC */
4891  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4892  /* send RECOVERY event for ALL nodes that match RSCN payload */
4893  lpfc_rscn_recovery_check(vport);
4894  return lpfc_els_handle_rscn(vport);
4895 }
4896 
4913 int
4915 {
4916  struct lpfc_nodelist *ndlp;
4917  struct lpfc_hba *phba = vport->phba;
4918 
4919  /* Ignore RSCN if the port is being torn down. */
4920  if (vport->load_flag & FC_UNLOADING) {
4921  lpfc_els_flush_rscn(vport);
4922  return 0;
4923  }
4924 
4925  /* Start timer for RSCN processing */
4926  lpfc_set_disctmo(vport);
4927 
4928  /* RSCN processed */
4930  "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4931  vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4932  vport->port_state);
4933 
4934  /* To process RSCN, first compare RSCN data with NameServer */
4935  vport->fc_ns_retry = 0;
4936  vport->num_disc_nodes = 0;
4937 
4938  ndlp = lpfc_findnode_did(vport, NameServer_DID);
4939  if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4940  && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
4941  /* Good ndlp, issue CT Request to NameServer */
4942  if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
4943  /* Wait for NameServer query cmpl before we can
4944  continue */
4945  return 1;
4946  } else {
4947  /* If login to NameServer does not exist, issue one */
4948  /* Good status, issue PLOGI to NameServer */
4949  ndlp = lpfc_findnode_did(vport, NameServer_DID);
4950  if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4951  /* Wait for NameServer login cmpl before we can
4952  continue */
4953  return 1;
4954 
4955  if (ndlp) {
4956  ndlp = lpfc_enable_node(vport, ndlp,
4958  if (!ndlp) {
4959  lpfc_els_flush_rscn(vport);
4960  return 0;
4961  }
4963  } else {
4964  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4965  if (!ndlp) {
4966  lpfc_els_flush_rscn(vport);
4967  return 0;
4968  }
4969  lpfc_nlp_init(vport, ndlp, NameServer_DID);
4970  ndlp->nlp_prev_state = ndlp->nlp_state;
4972  }
4973  ndlp->nlp_type |= NLP_FABRIC;
4975  /* Wait for NameServer login cmpl before we can
4976  * continue
4977  */
4978  return 1;
4979  }
4980 
4981  lpfc_els_flush_rscn(vport);
4982  return 0;
4983 }
4984 
5010 static int
5011 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5012  struct lpfc_nodelist *ndlp)
5013 {
5014  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5015  struct lpfc_hba *phba = vport->phba;
5016  struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5017  uint32_t *lp = (uint32_t *) pcmd->virt;
5018  IOCB_t *icmd = &cmdiocb->iocb;
5019  struct serv_parm *sp;
5020  LPFC_MBOXQ_t *mbox;
5021  struct ls_rjt stat;
5022  uint32_t cmd, did;
5023  int rc;
5024 
5025  cmd = *lp++;
5026  sp = (struct serv_parm *) lp;
5027 
5028  /* FLOGI received */
5029 
5030  lpfc_set_disctmo(vport);
5031 
5032  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
5033  /* We should never receive a FLOGI in loop mode, ignore it */
5034  did = icmd->un.elsreq64.remoteID;
5035 
5036  /* An FLOGI ELS command <elsCmd> was received from DID <did> in
5037  Loop Mode */
5038  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5039  "0113 An FLOGI ELS command x%x was "
5040  "received from DID x%x in Loop Mode\n",
5041  cmd, did);
5042  return 1;
5043  }
5044 
5045  if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
5046  /* For a FLOGI we accept, then if our portname is greater
5047  * then the remote portname we initiate Nport login.
5048  */
5049 
5050  rc = memcmp(&vport->fc_portname, &sp->portName,
5051  sizeof(struct lpfc_name));
5052 
5053  if (!rc) {
5054  if (phba->sli_rev < LPFC_SLI_REV4) {
5055  mbox = mempool_alloc(phba->mbox_mem_pool,
5056  GFP_KERNEL);
5057  if (!mbox)
5058  return 1;
5059  lpfc_linkdown(phba);
5060  lpfc_init_link(phba, mbox,
5061  phba->cfg_topology,
5062  phba->cfg_link_speed);
5063  mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5065  mbox->vport = vport;
5066  rc = lpfc_sli_issue_mbox(phba, mbox,
5067  MBX_NOWAIT);
5068  lpfc_set_loopback_flag(phba);
5069  if (rc == MBX_NOT_FINISHED)
5070  mempool_free(mbox, phba->mbox_mem_pool);
5071  return 1;
5072  } else {
5073  /* abort the flogi coming back to ourselves
5074  * due to external loopback on the port.
5075  */
5076  lpfc_els_abort_flogi(phba);
5077  return 0;
5078  }
5079  } else if (rc > 0) { /* greater than */
5080  spin_lock_irq(shost->host_lock);
5081  vport->fc_flag |= FC_PT2PT_PLOGI;
5082  spin_unlock_irq(shost->host_lock);
5083 
5084  /* If we have the high WWPN we can assign our own
5085  * myDID; otherwise, we have to WAIT for a PLOGI
5086  * from the remote NPort to find out what it
5087  * will be.
5088  */
5089  vport->fc_myDID = PT2PT_LocalID;
5090  }
5091 
5092  /*
5093  * The vport state should go to LPFC_FLOGI only
5094  * AFTER we issue a FLOGI, not receive one.
5095  */
5096  spin_lock_irq(shost->host_lock);
5097  vport->fc_flag |= FC_PT2PT;
5098  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5099  spin_unlock_irq(shost->host_lock);
5100 
5101  /*
5102  * We temporarily set fc_myDID to make it look like we are
5103  * a Fabric. This is done just so we end up with the right
5104  * did / sid on the FLOGI ACC rsp.
5105  */
5106  did = vport->fc_myDID;
5107  vport->fc_myDID = Fabric_DID;
5108 
5109  } else {
5110  /* Reject this request because invalid parameters */
5111  stat.un.b.lsRjtRsvd0 = 0;
5112  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5113  stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
5114  stat.un.b.vendorUnique = 0;
5115 
5116  /*
5117  * We temporarily set fc_myDID to make it look like we are
5118  * a Fabric. This is done just so we end up with the right
5119  * did / sid on the FLOGI LS_RJT rsp.
5120  */
5121  did = vport->fc_myDID;
5122  vport->fc_myDID = Fabric_DID;
5123 
5124  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5125  NULL);
5126 
5127  /* Now lets put fc_myDID back to what its supposed to be */
5128  vport->fc_myDID = did;
5129 
5130  return 1;
5131  }
5132 
5133  /* Send back ACC */
5134  lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
5135 
5136  /* Now lets put fc_myDID back to what its supposed to be */
5137  vport->fc_myDID = did;
5138 
5139  if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
5140 
5141  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5142  if (!mbox)
5143  goto fail;
5144 
5145  lpfc_config_link(phba, mbox);
5146 
5148  mbox->vport = vport;
5149  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5150  if (rc == MBX_NOT_FINISHED) {
5151  mempool_free(mbox, phba->mbox_mem_pool);
5152  goto fail;
5153  }
5154  }
5155 
5156  return 0;
5157 fail:
5158  return 1;
5159 }
5160 
5177 static int
5178 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5179  struct lpfc_nodelist *ndlp)
5180 {
5181  struct lpfc_dmabuf *pcmd;
5182  uint32_t *lp;
5183  IOCB_t *icmd;
5184  RNID *rn;
5185  struct ls_rjt stat;
5186  uint32_t cmd, did;
5187 
5188  icmd = &cmdiocb->iocb;
5189  did = icmd->un.elsreq64.remoteID;
5190  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5191  lp = (uint32_t *) pcmd->virt;
5192 
5193  cmd = *lp++;
5194  rn = (RNID *) lp;
5195 
5196  /* RNID received */
5197 
5198  switch (rn->Format) {
5199  case 0:
5200  case RNID_TOPOLOGY_DISC:
5201  /* Send back ACC */
5202  lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
5203  break;
5204  default:
5205  /* Reject this request because format not supported */
5206  stat.un.b.lsRjtRsvd0 = 0;
5207  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5208  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5209  stat.un.b.vendorUnique = 0;
5210  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5211  NULL);
5212  }
5213  return 0;
5214 }
5215 
5225 static int
5226 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5227  struct lpfc_nodelist *ndlp)
5228 {
5229  uint8_t *pcmd;
5230 
5231  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
5232 
5233  /* skip over first word of echo command to find echo data */
5234  pcmd += sizeof(uint32_t);
5235 
5236  lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
5237  return 0;
5238 }
5239 
5253 static int
5254 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5255  struct lpfc_nodelist *ndlp)
5256 {
5257  struct ls_rjt stat;
5258 
5259  /* For now, unconditionally reject this command */
5260  stat.un.b.lsRjtRsvd0 = 0;
5261  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5262  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5263  stat.un.b.vendorUnique = 0;
5264  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5265  return 0;
5266 }
5267 
5284 static void
5285 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5286  struct lpfc_nodelist *ndlp)
5287 {
5288  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5289  if (vport->phba->sli_rev == LPFC_SLI_REV4)
5290  lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
5291 }
5292 
5312 static void
5313 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5314 {
5315  MAILBOX_t *mb;
5316  IOCB_t *icmd;
5317  struct RLS_RSP *rls_rsp;
5318  uint8_t *pcmd;
5319  struct lpfc_iocbq *elsiocb;
5320  struct lpfc_nodelist *ndlp;
5321  uint16_t oxid;
5322  uint16_t rxid;
5323  uint32_t cmdsize;
5324 
5325  mb = &pmb->u.mb;
5326 
5327  ndlp = (struct lpfc_nodelist *) pmb->context2;
5328  rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5329  oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5330  pmb->context1 = NULL;
5331  pmb->context2 = NULL;
5332 
5333  if (mb->mbxStatus) {
5334  mempool_free(pmb, phba->mbox_mem_pool);
5335  return;
5336  }
5337 
5338  cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5339  elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5340  lpfc_max_els_tries, ndlp,
5341  ndlp->nlp_DID, ELS_CMD_ACC);
5342 
5343  /* Decrement the ndlp reference count from previous mbox command */
5344  lpfc_nlp_put(ndlp);
5345 
5346  if (!elsiocb) {
5347  mempool_free(pmb, phba->mbox_mem_pool);
5348  return;
5349  }
5350 
5351  icmd = &elsiocb->iocb;
5352  icmd->ulpContext = rxid;
5353  icmd->unsli3.rcvsli3.ox_id = oxid;
5354 
5355  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5356  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5357  pcmd += sizeof(uint32_t); /* Skip past command */
5358  rls_rsp = (struct RLS_RSP *)pcmd;
5359 
5361  rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5365  rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5366  mempool_free(pmb, phba->mbox_mem_pool);
5367  /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5368  lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5369  "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
5370  "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5371  elsiocb->iotag, elsiocb->iocb.ulpContext,
5372  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5373  ndlp->nlp_rpi);
5374  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5375  phba->fc_stat.elsXmitACC++;
5376  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5377  lpfc_els_free_iocb(phba, elsiocb);
5378 }
5379 
5399 static void
5400 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5401 {
5402  MAILBOX_t *mb;
5403  IOCB_t *icmd;
5404  RPS_RSP *rps_rsp;
5405  uint8_t *pcmd;
5406  struct lpfc_iocbq *elsiocb;
5407  struct lpfc_nodelist *ndlp;
5408  uint16_t status;
5409  uint16_t oxid;
5410  uint16_t rxid;
5411  uint32_t cmdsize;
5412 
5413  mb = &pmb->u.mb;
5414 
5415  ndlp = (struct lpfc_nodelist *) pmb->context2;
5416  rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5417  oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5418  pmb->context1 = NULL;
5419  pmb->context2 = NULL;
5420 
5421  if (mb->mbxStatus) {
5422  mempool_free(pmb, phba->mbox_mem_pool);
5423  return;
5424  }
5425 
5426  cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
5427  mempool_free(pmb, phba->mbox_mem_pool);
5428  elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5429  lpfc_max_els_tries, ndlp,
5430  ndlp->nlp_DID, ELS_CMD_ACC);
5431 
5432  /* Decrement the ndlp reference count from previous mbox command */
5433  lpfc_nlp_put(ndlp);
5434 
5435  if (!elsiocb)
5436  return;
5437 
5438  icmd = &elsiocb->iocb;
5439  icmd->ulpContext = rxid;
5440  icmd->unsli3.rcvsli3.ox_id = oxid;
5441 
5442  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5443  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5444  pcmd += sizeof(uint32_t); /* Skip past command */
5445  rps_rsp = (RPS_RSP *)pcmd;
5446 
5447  if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5448  status = 0x10;
5449  else
5450  status = 0x8;
5451  if (phba->pport->fc_flag & FC_FABRIC)
5452  status |= 0x4;
5453 
5454  rps_rsp->rsvd1 = 0;
5455  rps_rsp->portStatus = cpu_to_be16(status);
5457  rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
5461  rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5462  /* Xmit ELS RPS ACC response tag <ulpIoTag> */
5463  lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5464  "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
5465  "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5466  elsiocb->iotag, elsiocb->iocb.ulpContext,
5467  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5468  ndlp->nlp_rpi);
5469  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5470  phba->fc_stat.elsXmitACC++;
5471  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5472  lpfc_els_free_iocb(phba, elsiocb);
5473  return;
5474 }
5475 
5494 static int
5495 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5496  struct lpfc_nodelist *ndlp)
5497 {
5498  struct lpfc_hba *phba = vport->phba;
5499  LPFC_MBOXQ_t *mbox;
5500  struct lpfc_dmabuf *pcmd;
5501  struct ls_rjt stat;
5502 
5503  if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5504  (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5505  /* reject the unsolicited RPS request and done with it */
5506  goto reject_out;
5507 
5508  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5509 
5510  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5511  if (mbox) {
5512  lpfc_read_lnk_stat(phba, mbox);
5513  mbox->context1 = (void *)((unsigned long)
5514  ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5515  cmdiocb->iocb.ulpContext)); /* rx_id */
5516  mbox->context2 = lpfc_nlp_get(ndlp);
5517  mbox->vport = vport;
5518  mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5519  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5520  != MBX_NOT_FINISHED)
5521  /* Mbox completion will send ELS Response */
5522  return 0;
5523  /* Decrement reference count used for the failed mbox
5524  * command.
5525  */
5526  lpfc_nlp_put(ndlp);
5527  mempool_free(mbox, phba->mbox_mem_pool);
5528  }
5529 reject_out:
5530  /* issue rejection response */
5531  stat.un.b.lsRjtRsvd0 = 0;
5532  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5533  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5534  stat.un.b.vendorUnique = 0;
5535  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5536  return 0;
5537 }
5538 
5560 static int
5561 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5562  struct lpfc_nodelist *ndlp)
5563 {
5564  struct lpfc_hba *phba = vport->phba;
5565  struct ls_rjt stat;
5566  struct RTV_RSP *rtv_rsp;
5567  uint8_t *pcmd;
5568  struct lpfc_iocbq *elsiocb;
5569  uint32_t cmdsize;
5570 
5571 
5572  if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5573  (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5574  /* reject the unsolicited RPS request and done with it */
5575  goto reject_out;
5576 
5577  cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5578  elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5579  lpfc_max_els_tries, ndlp,
5580  ndlp->nlp_DID, ELS_CMD_ACC);
5581 
5582  if (!elsiocb)
5583  return 1;
5584 
5585  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5586  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5587  pcmd += sizeof(uint32_t); /* Skip past command */
5588 
5589  /* use the command's xri in the response */
5590  elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5591  elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5592 
5593  rtv_rsp = (struct RTV_RSP *)pcmd;
5594 
5595  /* populate RTV payload */
5596  rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5597  rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5598  bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5599  bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5600  rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5601 
5602  /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5603  lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5604  "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5605  "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5606  "Data: x%x x%x x%x\n",
5607  elsiocb->iotag, elsiocb->iocb.ulpContext,
5608  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5609  ndlp->nlp_rpi,
5610  rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5611  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5612  phba->fc_stat.elsXmitACC++;
5613  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5614  lpfc_els_free_iocb(phba, elsiocb);
5615  return 0;
5616 
5617 reject_out:
5618  /* issue rejection response */
5619  stat.un.b.lsRjtRsvd0 = 0;
5620  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5621  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5622  stat.un.b.vendorUnique = 0;
5623  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5624  return 0;
5625 }
5626 
5627 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
5628  * @vport: pointer to a host virtual N_Port data structure.
5629  * @cmdiocb: pointer to lpfc command iocb data structure.
5630  * @ndlp: pointer to a node-list data structure.
5631  *
5632  * This routine processes Read Port Status (RPS) IOCB received as an
5633  * ELS unsolicited event. It first checks the remote port state. If the
5634  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5635  * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
5636  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
5637  * for reading the HBA link statistics. It is for the callback function,
5638  * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
5639  * to actually sending out RPS Accept (ACC) response.
5640  *
5641  * Return codes
5642  * 0 - Successfully processed rps iocb (currently always return 0)
5643  **/
5644 static int
5645 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5646  struct lpfc_nodelist *ndlp)
5647 {
5648  struct lpfc_hba *phba = vport->phba;
5649  uint32_t *lp;
5650  uint8_t flag;
5651  LPFC_MBOXQ_t *mbox;
5652  struct lpfc_dmabuf *pcmd;
5653  RPS *rps;
5654  struct ls_rjt stat;
5655 
5656  if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5657  (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5658  /* reject the unsolicited RPS request and done with it */
5659  goto reject_out;
5660 
5661  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5662  lp = (uint32_t *) pcmd->virt;
5663  flag = (be32_to_cpu(*lp++) & 0xf);
5664  rps = (RPS *) lp;
5665 
5666  if ((flag == 0) ||
5667  ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
5668  ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
5669  sizeof(struct lpfc_name)) == 0))) {
5670 
5671  printk("Fix me....\n");
5672  dump_stack();
5673  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5674  if (mbox) {
5675  lpfc_read_lnk_stat(phba, mbox);
5676  mbox->context1 = (void *)((unsigned long)
5677  ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5678  cmdiocb->iocb.ulpContext)); /* rx_id */
5679  mbox->context2 = lpfc_nlp_get(ndlp);
5680  mbox->vport = vport;
5681  mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
5682  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5683  != MBX_NOT_FINISHED)
5684  /* Mbox completion will send ELS Response */
5685  return 0;
5686  /* Decrement reference count used for the failed mbox
5687  * command.
5688  */
5689  lpfc_nlp_put(ndlp);
5690  mempool_free(mbox, phba->mbox_mem_pool);
5691  }
5692  }
5693 
5694 reject_out:
5695  /* issue rejection response */
5696  stat.un.b.lsRjtRsvd0 = 0;
5697  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5698  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5699  stat.un.b.vendorUnique = 0;
5700  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5701  return 0;
5702 }
5703 
5704 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
5705  * @vport: pointer to a host virtual N_Port data structure.
5706  * @ndlp: pointer to a node-list data structure.
5707  * @did: DID of the target.
5708  * @rrq: Pointer to the rrq struct.
5709  *
5710  * Build a ELS RRQ command and send it to the target. If the issue_iocb is
5711  * Successful the the completion handler will clear the RRQ.
5712  *
5713  * Return codes
5714  * 0 - Successfully sent rrq els iocb.
5715  * 1 - Failed to send rrq els iocb.
5716  **/
5717 static int
5718 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5719  uint32_t did, struct lpfc_node_rrq *rrq)
5720 {
5721  struct lpfc_hba *phba = vport->phba;
5722  struct RRQ *els_rrq;
5723  IOCB_t *icmd;
5724  struct lpfc_iocbq *elsiocb;
5725  uint8_t *pcmd;
5726  uint16_t cmdsize;
5727  int ret;
5728 
5729 
5730  if (ndlp != rrq->ndlp)
5731  ndlp = rrq->ndlp;
5732  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5733  return 1;
5734 
5735  /* If ndlp is not NULL, we will bump the reference count on it */
5736  cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
5737  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
5738  ELS_CMD_RRQ);
5739  if (!elsiocb)
5740  return 1;
5741 
5742  icmd = &elsiocb->iocb;
5743  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5744 
5745  /* For RRQ request, remainder of payload is Exchange IDs */
5746  *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
5747  pcmd += sizeof(uint32_t);
5748  els_rrq = (struct RRQ *) pcmd;
5749 
5750  bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
5751  bf_set(rrq_rxid, els_rrq, rrq->rxid);
5752  bf_set(rrq_did, els_rrq, vport->fc_myDID);
5753  els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
5754  els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
5755 
5756 
5758  "Issue RRQ: did:x%x",
5759  did, rrq->xritag, rrq->rxid);
5760  elsiocb->context_un.rrq = rrq;
5761  elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
5762  ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5763 
5764  if (ret == IOCB_ERROR) {
5765  lpfc_els_free_iocb(phba, elsiocb);
5766  return 1;
5767  }
5768  return 0;
5769 }
5770 
5783 int
5784 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
5785 {
5786  struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
5787  rrq->nlp_DID);
5788  if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
5789  return lpfc_issue_els_rrq(rrq->vport, ndlp,
5790  rrq->nlp_DID, rrq);
5791  else
5792  return 1;
5793 }
5794 
5814 static int
5815 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5816  struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5817 {
5818  struct lpfc_hba *phba = vport->phba;
5819  IOCB_t *icmd, *oldcmd;
5820  RPL_RSP rpl_rsp;
5821  struct lpfc_iocbq *elsiocb;
5822  uint8_t *pcmd;
5823 
5824  elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5825  ndlp->nlp_DID, ELS_CMD_ACC);
5826 
5827  if (!elsiocb)
5828  return 1;
5829 
5830  icmd = &elsiocb->iocb;
5831  oldcmd = &oldiocb->iocb;
5832  icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5833  icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5834 
5835  pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5836  *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5837  pcmd += sizeof(uint16_t);
5838  *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
5839  pcmd += sizeof(uint16_t);
5840 
5841  /* Setup the RPL ACC payload */
5842  rpl_rsp.listLen = be32_to_cpu(1);
5843  rpl_rsp.index = 0;
5844  rpl_rsp.port_num_blk.portNum = 0;
5845  rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
5846  memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
5847  sizeof(struct lpfc_name));
5848  memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
5849  /* Xmit ELS RPL ACC response tag <ulpIoTag> */
5850  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5851  "0120 Xmit ELS RPL ACC response tag x%x "
5852  "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5853  "rpi x%x\n",
5854  elsiocb->iotag, elsiocb->iocb.ulpContext,
5855  ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5856  ndlp->nlp_rpi);
5857  elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5858  phba->fc_stat.elsXmitACC++;
5859  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
5860  IOCB_ERROR) {
5861  lpfc_els_free_iocb(phba, elsiocb);
5862  return 1;
5863  }
5864  return 0;
5865 }
5866 
5883 static int
5884 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5885  struct lpfc_nodelist *ndlp)
5886 {
5887  struct lpfc_dmabuf *pcmd;
5888  uint32_t *lp;
5889  uint32_t maxsize;
5890  uint16_t cmdsize;
5891  RPL *rpl;
5892  struct ls_rjt stat;
5893 
5894  if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5895  (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
5896  /* issue rejection response */
5897  stat.un.b.lsRjtRsvd0 = 0;
5898  stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5899  stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5900  stat.un.b.vendorUnique = 0;
5901  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5902  NULL);
5903  /* rejected the unsolicited RPL request and done with it */
5904  return 0;
5905  }
5906 
5907  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5908  lp = (uint32_t *) pcmd->virt;
5909  rpl = (RPL *) (lp + 1);
5910  maxsize = be32_to_cpu(rpl->maxsize);
5911 
5912  /* We support only one port */
5913  if ((rpl->index == 0) &&
5914  ((maxsize == 0) ||
5915  ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
5916  cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
5917  } else {
5918  cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
5919  }
5920  lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
5921 
5922  return 0;
5923 }
5924 
5949 static int
5950 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5951  struct lpfc_nodelist *ndlp)
5952 {
5953  struct lpfc_dmabuf *pcmd;
5954  uint32_t *lp;
5955  IOCB_t *icmd;
5956  FARP *fp;
5957  uint32_t cmd, cnt, did;
5958 
5959  icmd = &cmdiocb->iocb;
5960  did = icmd->un.elsreq64.remoteID;
5961  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5962  lp = (uint32_t *) pcmd->virt;
5963 
5964  cmd = *lp++;
5965  fp = (FARP *) lp;
5966  /* FARP-REQ received from DID <did> */
5967  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5968  "0601 FARP-REQ received from DID x%x\n", did);
5969  /* We will only support match on WWPN or WWNN */
5970  if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
5971  return 0;
5972  }
5973 
5974  cnt = 0;
5975  /* If this FARP command is searching for my portname */
5976  if (fp->Mflags & FARP_MATCH_PORT) {
5977  if (memcmp(&fp->RportName, &vport->fc_portname,
5978  sizeof(struct lpfc_name)) == 0)
5979  cnt = 1;
5980  }
5981 
5982  /* If this FARP command is searching for my nodename */
5983  if (fp->Mflags & FARP_MATCH_NODE) {
5984  if (memcmp(&fp->RnodeName, &vport->fc_nodename,
5985  sizeof(struct lpfc_name)) == 0)
5986  cnt = 1;
5987  }
5988 
5989  if (cnt) {
5990  if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
5991  (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
5992  /* Log back into the node before sending the FARP. */
5993  if (fp->Rflags & FARP_REQUEST_PLOGI) {
5994  ndlp->nlp_prev_state = ndlp->nlp_state;
5995  lpfc_nlp_set_state(vport, ndlp,
5997  lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5998  }
5999 
6000  /* Send a FARP response to that node */
6001  if (fp->Rflags & FARP_REQUEST_FARPR)
6002  lpfc_issue_els_farpr(vport, did, 0);
6003  }
6004  }
6005  return 0;
6006 }
6007 
6022 static int
6023 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6024  struct lpfc_nodelist *ndlp)
6025 {
6026  struct lpfc_dmabuf *pcmd;
6027  uint32_t *lp;
6028  IOCB_t *icmd;
6029  uint32_t cmd, did;
6030 
6031  icmd = &cmdiocb->iocb;
6032  did = icmd->un.elsreq64.remoteID;
6033  pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6034  lp = (uint32_t *) pcmd->virt;
6035 
6036  cmd = *lp++;
6037  /* FARP-RSP received from DID <did> */
6038  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6039  "0600 FARP-RSP received from DID x%x\n", did);
6040  /* ACCEPT the Farp resp request */
6041  lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6042 
6043  return 0;
6044 }
6045 
6065 static int
6066 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6067  struct lpfc_nodelist *fan_ndlp)
6068 {
6069  struct lpfc_hba *phba = vport->phba;
6070  uint32_t *lp;
6071  FAN *fp;
6072 
6073  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
6074  lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
6075  fp = (FAN *) ++lp;
6076  /* FAN received; Fan does not have a reply sequence */
6077  if ((vport == phba->pport) &&
6078  (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
6079  if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
6080  sizeof(struct lpfc_name))) ||
6081  (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
6082  sizeof(struct lpfc_name)))) {
6083  /* This port has switched fabrics. FLOGI is required */
6084  lpfc_issue_init_vfi(vport);
6085  } else {
6086  /* FAN verified - skip FLOGI */
6087  vport->fc_myDID = vport->fc_prevDID;
6088  if (phba->sli_rev < LPFC_SLI_REV4)
6090  else {
6091  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6092  "3138 Need register VFI: (x%x/%x)\n",
6093  vport->fc_prevDID, vport->fc_myDID);
6094  lpfc_issue_reg_vfi(vport);
6095  }
6096  }
6097  }
6098  return 0;
6099 }
6100 
6111 void
6112 lpfc_els_timeout(unsigned long ptr)
6113 {
6114  struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
6115  struct lpfc_hba *phba = vport->phba;
6116  uint32_t tmo_posted;
6117  unsigned long iflag;
6118 
6119  spin_lock_irqsave(&vport->work_port_lock, iflag);
6120  tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
6121  if (!tmo_posted)
6122  vport->work_port_events |= WORKER_ELS_TMO;
6123  spin_unlock_irqrestore(&vport->work_port_lock, iflag);
6124 
6125  if (!tmo_posted)
6126  lpfc_worker_wake_up(phba);
6127  return;
6128 }
6129 
6130 
6140 void
6142 {
6143  struct lpfc_hba *phba = vport->phba;
6144  struct lpfc_sli_ring *pring;
6145  struct lpfc_iocbq *tmp_iocb, *piocb;
6146  IOCB_t *cmd = NULL;
6147  struct lpfc_dmabuf *pcmd;
6148  uint32_t els_command = 0;
6149  uint32_t timeout;
6150  uint32_t remote_ID = 0xffffffff;
6151  LIST_HEAD(txcmplq_completions);
6152  LIST_HEAD(abort_list);
6153 
6154 
6155  timeout = (uint32_t)(phba->fc_ratov << 1);
6156 
6157  pring = &phba->sli.ring[LPFC_ELS_RING];
6158 
6159  spin_lock_irq(&phba->hbalock);
6160  list_splice_init(&pring->txcmplq, &txcmplq_completions);
6161  spin_unlock_irq(&phba->hbalock);
6162 
6163  list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
6164  cmd = &piocb->iocb;
6165 
6166  if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
6167  piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6168  piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6169  continue;
6170 
6171  if (piocb->vport != vport)
6172  continue;
6173 
6174  pcmd = (struct lpfc_dmabuf *) piocb->context2;
6175  if (pcmd)
6176  els_command = *(uint32_t *) (pcmd->virt);
6177 
6178  if (els_command == ELS_CMD_FARP ||
6179  els_command == ELS_CMD_FARPR ||
6180  els_command == ELS_CMD_FDISC)
6181  continue;
6182 
6183  if (piocb->drvrTimeout > 0) {
6184  if (piocb->drvrTimeout >= timeout)
6185  piocb->drvrTimeout -= timeout;
6186  else
6187  piocb->drvrTimeout = 0;
6188  continue;
6189  }
6190 
6191  remote_ID = 0xffffffff;
6192  if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
6193  remote_ID = cmd->un.elsreq64.remoteID;
6194  else {
6195  struct lpfc_nodelist *ndlp;
6196  ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
6197  if (ndlp && NLP_CHK_NODE_ACT(ndlp))
6198  remote_ID = ndlp->nlp_DID;
6199  }
6200  list_add_tail(&piocb->dlist, &abort_list);
6201  }
6202  spin_lock_irq(&phba->hbalock);
6203  list_splice(&txcmplq_completions, &pring->txcmplq);
6204  spin_unlock_irq(&phba->hbalock);
6205 
6206  list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
6207  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6208  "0127 ELS timeout Data: x%x x%x x%x "
6209  "x%x\n", els_command,
6210  remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
6211  spin_lock_irq(&phba->hbalock);
6212  list_del_init(&piocb->dlist);
6213  lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6214  spin_unlock_irq(&phba->hbalock);
6215  }
6216 
6217  if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
6218  mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
6219 }
6220 
6241 void
6243 {
6244  LIST_HEAD(completions);
6245  struct lpfc_hba *phba = vport->phba;
6246  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6247  struct lpfc_iocbq *tmp_iocb, *piocb;
6248  IOCB_t *cmd = NULL;
6249 
6250  lpfc_fabric_abort_vport(vport);
6251 
6252  spin_lock_irq(&phba->hbalock);
6253  list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6254  cmd = &piocb->iocb;
6255 
6256  if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6257  continue;
6258  }
6259 
6260  /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6261  if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6263  cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6264  cmd->ulpCommand == CMD_ABORT_XRI_CN)
6265  continue;
6266 
6267  if (piocb->vport != vport)
6268  continue;
6269 
6270  list_move_tail(&piocb->list, &completions);
6271  pring->txq_cnt--;
6272  }
6273 
6274  list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6275  if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6276  continue;
6277  }
6278 
6279  if (piocb->vport != vport)
6280  continue;
6281 
6282  lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6283  }
6284  spin_unlock_irq(&phba->hbalock);
6285 
6286  /* Cancell all the IOCBs from the completions list */
6287  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6289 
6290  return;
6291 }
6292 
6310 void
6312 {
6313  LIST_HEAD(completions);
6314  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6315  struct lpfc_iocbq *tmp_iocb, *piocb;
6316  IOCB_t *cmd = NULL;
6317 
6318  lpfc_fabric_abort_hba(phba);
6319  spin_lock_irq(&phba->hbalock);
6320  list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6321  cmd = &piocb->iocb;
6322  if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6323  continue;
6324  /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6325  if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6327  cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6328  cmd->ulpCommand == CMD_ABORT_XRI_CN)
6329  continue;
6330  list_move_tail(&piocb->list, &completions);
6331  pring->txq_cnt--;
6332  }
6333  list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6334  if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6335  continue;
6336  lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6337  }
6338  spin_unlock_irq(&phba->hbalock);
6339 
6340  /* Cancel all the IOCBs from the completions list */
6341  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6343 
6344  return;
6345 }
6346 
6356 void
6358  struct lpfc_iocbq *cmdiocbp,
6359  struct lpfc_iocbq *rspiocbp)
6360 {
6361  struct lpfc_vport *vport = cmdiocbp->vport;
6362  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6363  struct lpfc_lsrjt_event lsrjt_event;
6364  struct lpfc_fabric_event_header fabric_event;
6365  struct ls_rjt stat;
6366  struct lpfc_nodelist *ndlp;
6367  uint32_t *pcmd;
6368 
6369  ndlp = cmdiocbp->context1;
6370  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6371  return;
6372 
6373  if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
6374  lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
6375  lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
6376  memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
6377  sizeof(struct lpfc_name));
6378  memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
6379  sizeof(struct lpfc_name));
6380  pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
6381  cmdiocbp->context2)->virt);
6382  lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
6383  stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
6384  lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
6385  lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
6388  sizeof(lsrjt_event),
6389  (char *)&lsrjt_event,
6391  return;
6392  }
6393  if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
6394  (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
6395  fabric_event.event_type = FC_REG_FABRIC_EVENT;
6396  if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
6397  fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
6398  else
6399  fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
6400  memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
6401  sizeof(struct lpfc_name));
6402  memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
6403  sizeof(struct lpfc_name));
6406  sizeof(fabric_event),
6407  (char *)&fabric_event,
6409  return;
6410  }
6411 
6412 }
6413 
6423 static void
6424 lpfc_send_els_event(struct lpfc_vport *vport,
6425  struct lpfc_nodelist *ndlp,
6426  uint32_t *payload)
6427 {
6428  struct lpfc_els_event_header *els_data = NULL;
6429  struct lpfc_logo_event *logo_data = NULL;
6430  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6431 
6432  if (*payload == ELS_CMD_LOGO) {
6433  logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
6434  if (!logo_data) {
6435  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6436  "0148 Failed to allocate memory "
6437  "for LOGO event\n");
6438  return;
6439  }
6440  els_data = &logo_data->header;
6441  } else {
6442  els_data = kmalloc(sizeof(struct lpfc_els_event_header),
6443  GFP_KERNEL);
6444  if (!els_data) {
6445  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6446  "0149 Failed to allocate memory "
6447  "for ELS event\n");
6448  return;
6449  }
6450  }
6451  els_data->event_type = FC_REG_ELS_EVENT;
6452  switch (*payload) {
6453  case ELS_CMD_PLOGI:
6454  els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
6455  break;
6456  case ELS_CMD_PRLO:
6457  els_data->subcategory = LPFC_EVENT_PRLO_RCV;
6458  break;
6459  case ELS_CMD_ADISC:
6460  els_data->subcategory = LPFC_EVENT_ADISC_RCV;
6461  break;
6462  case ELS_CMD_LOGO:
6463  els_data->subcategory = LPFC_EVENT_LOGO_RCV;
6464  /* Copy the WWPN in the LOGO payload */
6465  memcpy(logo_data->logo_wwpn, &payload[2],
6466  sizeof(struct lpfc_name));
6467  break;
6468  default:
6469  kfree(els_data);
6470  return;
6471  }
6472  memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
6473  memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
6474  if (*payload == ELS_CMD_LOGO) {
6477  sizeof(struct lpfc_logo_event),
6478  (char *)logo_data,
6480  kfree(logo_data);
6481  } else {
6484  sizeof(struct lpfc_els_event_header),
6485  (char *)els_data,
6487  kfree(els_data);
6488  }
6489 
6490  return;
6491 }
6492 
6493 
6508 static void
6509 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6510  struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
6511 {
6512  struct Scsi_Host *shost;
6513  struct lpfc_nodelist *ndlp;
6514  struct ls_rjt stat;
6515  uint32_t *payload;
6516  uint32_t cmd, did, newnode, rjt_err = 0;
6517  IOCB_t *icmd = &elsiocb->iocb;
6518 
6519  if (!vport || !(elsiocb->context2))
6520  goto dropit;
6521 
6522  newnode = 0;
6523  payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
6524  cmd = *payload;
6525  if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
6526  lpfc_post_buffer(phba, pring, 1);
6527 
6528  did = icmd->un.rcvels.remoteID;
6529  if (icmd->ulpStatus) {
6531  "RCV Unsol ELS: status:x%x/x%x did:x%x",
6532  icmd->ulpStatus, icmd->un.ulpWord[4], did);
6533  goto dropit;
6534  }
6535 
6536  /* Check to see if link went down during discovery */
6537  if (lpfc_els_chk_latt(vport))
6538  goto dropit;
6539 
6540  /* Ignore traffic received during vport shutdown. */
6541  if (vport->load_flag & FC_UNLOADING)
6542  goto dropit;
6543 
6544  /* If NPort discovery is delayed drop incoming ELS */
6545  if ((vport->fc_flag & FC_DISC_DELAYED) &&
6546  (cmd != ELS_CMD_PLOGI))
6547  goto dropit;
6548 
6549  ndlp = lpfc_findnode_did(vport, did);
6550  if (!ndlp) {
6551  /* Cannot find existing Fabric ndlp, so allocate a new one */
6552  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6553  if (!ndlp)
6554  goto dropit;
6555 
6556  lpfc_nlp_init(vport, ndlp, did);
6557  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6558  newnode = 1;
6559  if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6560  ndlp->nlp_type |= NLP_FABRIC;
6561  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6562  ndlp = lpfc_enable_node(vport, ndlp,
6564  if (!ndlp)
6565  goto dropit;
6566  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6567  newnode = 1;
6568  if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6569  ndlp->nlp_type |= NLP_FABRIC;
6570  } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
6571  /* This is similar to the new node path */
6572  ndlp = lpfc_nlp_get(ndlp);
6573  if (!ndlp)
6574  goto dropit;
6575  lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6576  newnode = 1;
6577  }
6578 
6579  phba->fc_stat.elsRcvFrame++;
6580 
6581  elsiocb->context1 = lpfc_nlp_get(ndlp);
6582  elsiocb->vport = vport;
6583 
6584  if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
6585  cmd &= ELS_CMD_MASK;
6586  }
6587  /* ELS command <elsCmd> received from NPORT <did> */
6588  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6589  "0112 ELS command x%x received from NPORT x%x "
6590  "Data: x%x\n", cmd, did, vport->port_state);
6591  switch (cmd) {
6592  case ELS_CMD_PLOGI:
6594  "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
6595  did, vport->port_state, ndlp->nlp_flag);
6596 
6597  phba->fc_stat.elsRcvPLOGI++;
6598  ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6599 
6600  lpfc_send_els_event(vport, ndlp, payload);
6601 
6602  /* If Nport discovery is delayed, reject PLOGIs */
6603  if (vport->fc_flag & FC_DISC_DELAYED) {
6604  rjt_err = LSRJT_UNABLE_TPC;
6605  break;
6606  }
6607  if (vport->port_state < LPFC_DISC_AUTH) {
6608  if (!(phba->pport->fc_flag & FC_PT2PT) ||
6609  (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6610  rjt_err = LSRJT_UNABLE_TPC;
6611  break;
6612  }
6613  /* We get here, and drop thru, if we are PT2PT with
6614  * another NPort and the other side has initiated
6615  * the PLOGI before responding to our FLOGI.
6616  */
6617  }
6618 
6619  shost = lpfc_shost_from_vport(vport);
6620  spin_lock_irq(shost->host_lock);
6621  ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6622  spin_unlock_irq(shost->host_lock);
6623 
6624  lpfc_disc_state_machine(vport, ndlp, elsiocb,
6626 
6627  break;
6628  case ELS_CMD_FLOGI:
6630  "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
6631  did, vport->port_state, ndlp->nlp_flag);
6632 
6633  phba->fc_stat.elsRcvFLOGI++;
6634  lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
6635  if (newnode)
6636  lpfc_nlp_put(ndlp);
6637  break;
6638  case ELS_CMD_LOGO:
6640  "RCV LOGO: did:x%x/ste:x%x flg:x%x",
6641  did, vport->port_state, ndlp->nlp_flag);
6642 
6643  phba->fc_stat.elsRcvLOGO++;
6644  lpfc_send_els_event(vport, ndlp, payload);
6645  if (vport->port_state < LPFC_DISC_AUTH) {
6646  rjt_err = LSRJT_UNABLE_TPC;
6647  break;
6648  }
6649  lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
6650  break;
6651  case ELS_CMD_PRLO:
6653  "RCV PRLO: did:x%x/ste:x%x flg:x%x",
6654  did, vport->port_state, ndlp->nlp_flag);
6655 
6656  phba->fc_stat.elsRcvPRLO++;
6657  lpfc_send_els_event(vport, ndlp, payload);
6658  if (vport->port_state < LPFC_DISC_AUTH) {
6659  rjt_err = LSRJT_UNABLE_TPC;
6660  break;
6661  }
6662  lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
6663  break;
6664  case ELS_CMD_RSCN:
6665  phba->fc_stat.elsRcvRSCN++;
6666  lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
6667  if (newnode)
6668  lpfc_nlp_put(ndlp);
6669  break;
6670  case ELS_CMD_ADISC:
6672  "RCV ADISC: did:x%x/ste:x%x flg:x%x",
6673  did, vport->port_state, ndlp->nlp_flag);
6674 
6675  lpfc_send_els_event(vport, ndlp, payload);
6676  phba->fc_stat.elsRcvADISC++;
6677  if (vport->port_state < LPFC_DISC_AUTH) {
6678  rjt_err = LSRJT_UNABLE_TPC;
6679  break;
6680  }
6681  lpfc_disc_state_machine(vport, ndlp, elsiocb,
6683  break;
6684  case ELS_CMD_PDISC:
6686  "RCV PDISC: did:x%x/ste:x%x flg:x%x",
6687  did, vport->port_state, ndlp->nlp_flag);
6688 
6689  phba->fc_stat.elsRcvPDISC++;
6690  if (vport->port_state < LPFC_DISC_AUTH) {
6691  rjt_err = LSRJT_UNABLE_TPC;
6692  break;
6693  }
6694  lpfc_disc_state_machine(vport, ndlp, elsiocb,
6696  break;
6697  case ELS_CMD_FARPR:
6699  "RCV FARPR: did:x%x/ste:x%x flg:x%x",
6700  did, vport->port_state, ndlp->nlp_flag);
6701 
6702  phba->fc_stat.elsRcvFARPR++;
6703  lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
6704  break;
6705  case ELS_CMD_FARP:
6707  "RCV FARP: did:x%x/ste:x%x flg:x%x",
6708  did, vport->port_state, ndlp->nlp_flag);
6709 
6710  phba->fc_stat.elsRcvFARP++;
6711  lpfc_els_rcv_farp(vport, elsiocb, ndlp);
6712  break;
6713  case ELS_CMD_FAN:
6715  "RCV FAN: did:x%x/ste:x%x flg:x%x",
6716  did, vport->port_state, ndlp->nlp_flag);
6717 
6718  phba->fc_stat.elsRcvFAN++;
6719  lpfc_els_rcv_fan(vport, elsiocb, ndlp);
6720  break;
6721  case ELS_CMD_PRLI:
6723  "RCV PRLI: did:x%x/ste:x%x flg:x%x",
6724  did, vport->port_state, ndlp->nlp_flag);
6725 
6726  phba->fc_stat.elsRcvPRLI++;
6727  if (vport->port_state < LPFC_DISC_AUTH) {
6728  rjt_err = LSRJT_UNABLE_TPC;
6729  break;
6730  }
6731  lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
6732  break;
6733  case ELS_CMD_LIRR:
6735  "RCV LIRR: did:x%x/ste:x%x flg:x%x",
6736  did, vport->port_state, ndlp->nlp_flag);
6737 
6738  phba->fc_stat.elsRcvLIRR++;
6739  lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
6740  if (newnode)
6741  lpfc_nlp_put(ndlp);
6742  break;
6743  case ELS_CMD_RLS:
6745  "RCV RLS: did:x%x/ste:x%x flg:x%x",
6746  did, vport->port_state, ndlp->nlp_flag);
6747 
6748  phba->fc_stat.elsRcvRLS++;
6749  lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6750  if (newnode)
6751  lpfc_nlp_put(ndlp);
6752  break;
6753  case ELS_CMD_RPS:
6755  "RCV RPS: did:x%x/ste:x%x flg:x%x",
6756  did, vport->port_state, ndlp->nlp_flag);
6757 
6758  phba->fc_stat.elsRcvRPS++;
6759  lpfc_els_rcv_rps(vport, elsiocb, ndlp);
6760  if (newnode)
6761  lpfc_nlp_put(ndlp);
6762  break;
6763  case ELS_CMD_RPL:
6765  "RCV RPL: did:x%x/ste:x%x flg:x%x",
6766  did, vport->port_state, ndlp->nlp_flag);
6767 
6768  phba->fc_stat.elsRcvRPL++;
6769  lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
6770  if (newnode)
6771  lpfc_nlp_put(ndlp);
6772  break;
6773  case ELS_CMD_RNID:
6775  "RCV RNID: did:x%x/ste:x%x flg:x%x",
6776  did, vport->port_state, ndlp->nlp_flag);
6777 
6778  phba->fc_stat.elsRcvRNID++;
6779  lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
6780  if (newnode)
6781  lpfc_nlp_put(ndlp);
6782  break;
6783  case ELS_CMD_RTV:
6785  "RCV RTV: did:x%x/ste:x%x flg:x%x",
6786  did, vport->port_state, ndlp->nlp_flag);
6787  phba->fc_stat.elsRcvRTV++;
6788  lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6789  if (newnode)
6790  lpfc_nlp_put(ndlp);
6791  break;
6792  case ELS_CMD_RRQ:
6794  "RCV RRQ: did:x%x/ste:x%x flg:x%x",
6795  did, vport->port_state, ndlp->nlp_flag);
6796 
6797  phba->fc_stat.elsRcvRRQ++;
6798  lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
6799  if (newnode)
6800  lpfc_nlp_put(ndlp);
6801  break;
6802  case ELS_CMD_ECHO:
6804  "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6805  did, vport->port_state, ndlp->nlp_flag);
6806 
6807  phba->fc_stat.elsRcvECHO++;
6808  lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6809  if (newnode)
6810  lpfc_nlp_put(ndlp);
6811  break;
6812  default:
6814  "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
6815  cmd, did, vport->port_state);
6816 
6817  /* Unsupported ELS command, reject */
6818  rjt_err = LSRJT_CMD_UNSUPPORTED;
6819 
6820  /* Unknown ELS command <elsCmd> received from NPORT <did> */
6821  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6822  "0115 Unknown ELS command x%x "
6823  "received from NPORT x%x\n", cmd, did);
6824  if (newnode)
6825  lpfc_nlp_put(ndlp);
6826  break;
6827  }
6828 
6829  /* check if need to LS_RJT received ELS cmd */
6830  if (rjt_err) {
6831  memset(&stat, 0, sizeof(stat));
6832  stat.un.b.lsRjtRsnCode = rjt_err;
6833  stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
6834  lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6835  NULL);
6836  }
6837 
6838  lpfc_nlp_put(elsiocb->context1);
6839  elsiocb->context1 = NULL;
6840  return;
6841 
6842 dropit:
6843  if (vport && !(vport->load_flag & FC_UNLOADING))
6844  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6845  "0111 Dropping received ELS cmd "
6846  "Data: x%x x%x x%x\n",
6847  icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
6848  phba->fc_stat.elsRcvDrop++;
6849 }
6850 
6863 void
6864 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6865  struct lpfc_iocbq *elsiocb)
6866 {
6867  struct lpfc_vport *vport = phba->pport;
6868  IOCB_t *icmd = &elsiocb->iocb;
6869  dma_addr_t paddr;
6870  struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
6871  struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
6872 
6873  elsiocb->context1 = NULL;
6874  elsiocb->context2 = NULL;
6875  elsiocb->context3 = NULL;
6876 
6877  if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6879  } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6880  (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
6882  phba->fc_stat.NoRcvBuf++;
6883  /* Not enough posted buffers; Try posting more buffers */
6884  if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
6885  lpfc_post_buffer(phba, pring, 0);
6886  return;
6887  }
6888 
6889  if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6890  (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
6891  icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6892  if (icmd->unsli3.rcvsli3.vpi == 0xffff)
6893  vport = phba->pport;
6894  else
6895  vport = lpfc_find_vport_by_vpid(phba,
6896  icmd->unsli3.rcvsli3.vpi);
6897  }
6898 
6899  /* If there are no BDEs associated
6900  * with this IOCB, there is nothing to do.
6901  */
6902  if (icmd->ulpBdeCount == 0)
6903  return;
6904 
6905  /* type of ELS cmd is first 32bit word
6906  * in packet
6907  */
6908  if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6909  elsiocb->context2 = bdeBuf1;
6910  } else {
6911  paddr = getPaddr(icmd->un.cont64[0].addrHigh,
6912  icmd->un.cont64[0].addrLow);
6913  elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
6914  paddr);
6915  }
6916 
6917  lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6918  /*
6919  * The different unsolicited event handlers would tell us
6920  * if they are done with "mp" by setting context2 to NULL.
6921  */
6922  if (elsiocb->context2) {
6923  lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
6924  elsiocb->context2 = NULL;
6925  }
6926 
6927  /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
6928  if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
6929  icmd->ulpBdeCount == 2) {
6930  elsiocb->context2 = bdeBuf2;
6931  lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6932  /* free mp if we are done with it */
6933  if (elsiocb->context2) {
6934  lpfc_in_buf_free(phba, elsiocb->context2);
6935  elsiocb->context2 = NULL;
6936  }
6937  }
6938 }
6939 
6953 void
6954 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6955 {
6956  struct lpfc_nodelist *ndlp, *ndlp_fdmi;
6957  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6958 
6959  /*
6960  * If lpfc_delay_discovery parameter is set and the clean address
6961  * bit is cleared and fc fabric parameters chenged, delay FC NPort
6962  * discovery.
6963  */
6964  spin_lock_irq(shost->host_lock);
6965  if (vport->fc_flag & FC_DISC_DELAYED) {
6966  spin_unlock_irq(shost->host_lock);
6967  mod_timer(&vport->delayed_disc_tmo,
6968  jiffies + HZ * phba->fc_ratov);
6969  return;
6970  }
6971  spin_unlock_irq(shost->host_lock);
6972 
6973  ndlp = lpfc_findnode_did(vport, NameServer_DID);
6974  if (!ndlp) {
6975  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6976  if (!ndlp) {
6977  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6978  lpfc_disc_start(vport);
6979  return;
6980  }
6982  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6983  "0251 NameServer login: no memory\n");
6984  return;
6985  }
6986  lpfc_nlp_init(vport, ndlp, NameServer_DID);
6987  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
6988  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6989  if (!ndlp) {
6990  if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6991  lpfc_disc_start(vport);
6992  return;
6993  }
6995  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6996  "0348 NameServer login: node freed\n");
6997  return;
6998  }
6999  }
7000  ndlp->nlp_type |= NLP_FABRIC;
7001 
7003 
7004  if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
7006  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7007  "0252 Cannot issue NameServer login\n");
7008  return;
7009  }
7010 
7011  if (vport->cfg_fdmi_on) {
7012  /* If this is the first time, allocate an ndlp and initialize
7013  * it. Otherwise, make sure the node is enabled and then do the
7014  * login.
7015  */
7016  ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
7017  if (!ndlp_fdmi) {
7018  ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
7019  GFP_KERNEL);
7020  if (ndlp_fdmi) {
7021  lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
7022  ndlp_fdmi->nlp_type |= NLP_FABRIC;
7023  } else
7024  return;
7025  }
7026  if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
7027  ndlp_fdmi = lpfc_enable_node(vport,
7028  ndlp_fdmi,
7030 
7031  if (ndlp_fdmi) {
7032  lpfc_nlp_set_state(vport, ndlp_fdmi,
7034  lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
7035  }
7036  }
7037 }
7038 
7051 static void
7052 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7053 {
7054  struct lpfc_vport *vport = pmb->vport;
7055  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7056  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
7057  MAILBOX_t *mb = &pmb->u.mb;
7058  int rc;
7059 
7060  spin_lock_irq(shost->host_lock);
7061  vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
7062  spin_unlock_irq(shost->host_lock);
7063 
7064  if (mb->mbxStatus) {
7066  "0915 Register VPI failed : Status: x%x"
7067  " upd bit: x%x \n", mb->mbxStatus,
7068  mb->un.varRegVpi.upd);
7069  if (phba->sli_rev == LPFC_SLI_REV4 &&
7070  mb->un.varRegVpi.upd)
7071  goto mbox_err_exit ;
7072 
7073  switch (mb->mbxStatus) {
7074  case 0x11: /* unsupported feature */
7075  case 0x9603: /* max_vpi exceeded */
7076  case 0x9602: /* Link event since CLEAR_LA */
7077  /* giving up on vport registration */
7079  spin_lock_irq(shost->host_lock);
7080  vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7081  spin_unlock_irq(shost->host_lock);
7082  lpfc_can_disctmo(vport);
7083  break;
7084  /* If reg_vpi fail with invalid VPI status, re-init VPI */
7085  case 0x20:
7086  spin_lock_irq(shost->host_lock);
7087  vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7088  spin_unlock_irq(shost->host_lock);
7089  lpfc_init_vpi(phba, pmb, vport->vpi);
7090  pmb->vport = vport;
7092  rc = lpfc_sli_issue_mbox(phba, pmb,
7093  MBX_NOWAIT);
7094  if (rc == MBX_NOT_FINISHED) {
7095  lpfc_printf_vlog(vport,
7096  KERN_ERR, LOG_MBOX,
7097  "2732 Failed to issue INIT_VPI"
7098  " mailbox command\n");
7099  } else {
7100  lpfc_nlp_put(ndlp);
7101  return;
7102  }
7103 
7104  default:
7105  /* Try to recover from this error */
7106  if (phba->sli_rev == LPFC_SLI_REV4)
7107  lpfc_sli4_unreg_all_rpis(vport);
7108  lpfc_mbx_unreg_vpi(vport);
7109  spin_lock_irq(shost->host_lock);
7110  vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7111  spin_unlock_irq(shost->host_lock);
7112  if (vport->port_type == LPFC_PHYSICAL_PORT
7113  && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
7114  lpfc_issue_init_vfi(vport);
7115  else
7116  lpfc_initial_fdisc(vport);
7117  break;
7118  }
7119  } else {
7120  spin_lock_irq(shost->host_lock);
7121  vport->vpi_state |= LPFC_VPI_REGISTERED;
7122  spin_unlock_irq(shost->host_lock);
7123  if (vport == phba->pport) {
7124  if (phba->sli_rev < LPFC_SLI_REV4)
7126  else {
7127  /*
7128  * If the physical port is instantiated using
7129  * FDISC, do not start vport discovery.
7130  */
7131  if (vport->port_state != LPFC_FDISC)
7132  lpfc_start_fdiscs(phba);
7133  lpfc_do_scr_ns_plogi(phba, vport);
7134  }
7135  } else
7136  lpfc_do_scr_ns_plogi(phba, vport);
7137  }
7138 mbox_err_exit:
7139  /* Now, we decrement the ndlp reference count held for this
7140  * callback function
7141  */
7142  lpfc_nlp_put(ndlp);
7143 
7144  mempool_free(pmb, phba->mbox_mem_pool);
7145  return;
7146 }
7147 
7157 void
7158 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
7159  struct lpfc_nodelist *ndlp)
7160 {
7161  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7162  LPFC_MBOXQ_t *mbox;
7163 
7164  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7165  if (mbox) {
7166  lpfc_reg_vpi(vport, mbox);
7167  mbox->vport = vport;
7168  mbox->context2 = lpfc_nlp_get(ndlp);
7169  mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
7170  if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7171  == MBX_NOT_FINISHED) {
7172  /* mailbox command not success, decrement ndlp
7173  * reference count for this command
7174  */
7175  lpfc_nlp_put(ndlp);
7176  mempool_free(mbox, phba->mbox_mem_pool);
7177 
7179  "0253 Register VPI: Can't send mbox\n");
7180  goto mbox_err_exit;
7181  }
7182  } else {
7184  "0254 Register VPI: no memory\n");
7185  goto mbox_err_exit;
7186  }
7187  return;
7188 
7189 mbox_err_exit:
7191  spin_lock_irq(shost->host_lock);
7192  vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
7193  spin_unlock_irq(shost->host_lock);
7194  return;
7195 }
7196 
7203 void
7205 {
7206  struct lpfc_vport **vports;
7207  struct lpfc_nodelist *ndlp;
7209  int i;
7210 
7211  /* Treat this failure as linkdown for all vports */
7212  link_state = phba->link_state;
7213  lpfc_linkdown(phba);
7214  phba->link_state = link_state;
7215 
7216  vports = lpfc_create_vport_work_array(phba);
7217 
7218  if (vports) {
7219  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
7220  ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
7221  if (ndlp)
7222  lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
7223  lpfc_els_flush_cmd(vports[i]);
7224  }
7225  lpfc_destroy_vport_work_array(phba, vports);
7226  }
7227 }
7228 
7237 void
7239 {
7240  struct lpfc_nodelist *ndlp;
7241  struct Scsi_Host *shost;
7242 
7243  /* Cancel the all vports retry delay retry timers */
7245 
7246  /* If fabric require FLOGI, then re-instantiate physical login */
7247  ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
7248  if (!ndlp)
7249  return;
7250 
7251  shost = lpfc_shost_from_vport(phba->pport);
7252  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
7253  spin_lock_irq(shost->host_lock);
7254  ndlp->nlp_flag |= NLP_DELAY_TMO;
7255  spin_unlock_irq(shost->host_lock);
7257  phba->pport->port_state = LPFC_FLOGI;
7258  return;
7259 }
7260 
7270 static int
7271 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
7272  struct lpfc_iocbq *cmdiocb,
7273  struct lpfc_iocbq *rspiocb)
7274 {
7275 
7276  if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
7277  (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
7278  return 0;
7279  else
7280  return 1;
7281 }
7282 
7303 static void
7304 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7305  struct lpfc_iocbq *rspiocb)
7306 {
7307  struct lpfc_vport *vport = cmdiocb->vport;
7308  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7309  struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
7310  struct lpfc_nodelist *np;
7311  struct lpfc_nodelist *next_np;
7312  IOCB_t *irsp = &rspiocb->iocb;
7313  struct lpfc_iocbq *piocb;
7314  struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
7315  struct serv_parm *sp;
7316  uint8_t fabric_param_changed;
7317 
7318  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7319  "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
7320  irsp->ulpStatus, irsp->un.ulpWord[4],
7321  vport->fc_prevDID);
7322  /* Since all FDISCs are being single threaded, we
7323  * must reset the discovery timer for ALL vports
7324  * waiting to send FDISC when one completes.
7325  */
7326  list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
7327  lpfc_set_disctmo(piocb->vport);
7328  }
7329 
7331  "FDISC cmpl: status:x%x/x%x prevdid:x%x",
7332  irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
7333 
7334  if (irsp->ulpStatus) {
7335 
7336  if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
7338  goto out;
7339  }
7340 
7341  /* Check for retry */
7342  if (lpfc_els_retry(phba, cmdiocb, rspiocb))
7343  goto out;
7344  /* FDISC failed */
7345  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7346  "0126 FDISC failed. (x%x/x%x)\n",
7347  irsp->ulpStatus, irsp->un.ulpWord[4]);
7348  goto fdisc_failed;
7349  }
7350  spin_lock_irq(shost->host_lock);
7351  vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
7352  vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
7353  vport->fc_flag |= FC_FABRIC;
7354  if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
7355  vport->fc_flag |= FC_PUBLIC_LOOP;
7356  spin_unlock_irq(shost->host_lock);
7357 
7358  vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7360  prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7361  sp = prsp->virt + sizeof(uint32_t);
7362  fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7363  memcpy(&vport->fabric_portname, &sp->portName,
7364  sizeof(struct lpfc_name));
7365  memcpy(&vport->fabric_nodename, &sp->nodeName,
7366  sizeof(struct lpfc_name));
7367  if (fabric_param_changed &&
7368  !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7369  /* If our NportID changed, we need to ensure all
7370  * remaining NPORTs get unreg_login'ed so we can
7371  * issue unreg_vpi.
7372  */
7373  list_for_each_entry_safe(np, next_np,
7374  &vport->fc_nodes, nlp_listp) {
7375  if (!NLP_CHK_NODE_ACT(ndlp) ||
7376  (np->nlp_state != NLP_STE_NPR_NODE) ||
7377  !(np->nlp_flag & NLP_NPR_ADISC))
7378  continue;
7379  spin_lock_irq(shost->host_lock);
7380  np->nlp_flag &= ~NLP_NPR_ADISC;
7381  spin_unlock_irq(shost->host_lock);
7382  lpfc_unreg_rpi(vport, np);
7383  }
7385 
7386  if (phba->sli_rev == LPFC_SLI_REV4)
7387  lpfc_sli4_unreg_all_rpis(vport);
7388 
7389  lpfc_mbx_unreg_vpi(vport);
7390  spin_lock_irq(shost->host_lock);
7391  vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7392  if (phba->sli_rev == LPFC_SLI_REV4)
7393  vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
7394  else
7395  vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
7396  spin_unlock_irq(shost->host_lock);
7397  } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
7398  !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
7399  /*
7400  * Driver needs to re-reg VPI in order for f/w
7401  * to update the MAC address.
7402  */
7403  lpfc_register_new_vport(phba, vport, ndlp);
7404  goto out;
7405  }
7406 
7407  if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
7408  lpfc_issue_init_vpi(vport);
7409  else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
7410  lpfc_register_new_vport(phba, vport, ndlp);
7411  else
7412  lpfc_do_scr_ns_plogi(phba, vport);
7413  goto out;
7414 fdisc_failed:
7416  /* Cancel discovery timer */
7417  lpfc_can_disctmo(vport);
7418  lpfc_nlp_put(ndlp);
7419 out:
7420  lpfc_els_free_iocb(phba, cmdiocb);
7421 }
7422 
7443 static int
7444 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7445  uint8_t retry)
7446 {
7447  struct lpfc_hba *phba = vport->phba;
7448  IOCB_t *icmd;
7449  struct lpfc_iocbq *elsiocb;
7450  struct serv_parm *sp;
7451  uint8_t *pcmd;
7452  uint16_t cmdsize;
7453  int did = ndlp->nlp_DID;
7454  int rc;
7455 
7456  vport->port_state = LPFC_FDISC;
7457  vport->fc_myDID = 0;
7458  cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7459  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7460  ELS_CMD_FDISC);
7461  if (!elsiocb) {
7463  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7464  "0255 Issue FDISC: no IOCB\n");
7465  return 1;
7466  }
7467 
7468  icmd = &elsiocb->iocb;
7469  icmd->un.elsreq64.myID = 0;
7470  icmd->un.elsreq64.fl = 1;
7471 
7472  /*
7473  * SLI3 ports require a different context type value than SLI4.
7474  * Catch SLI3 ports here and override the prep.
7475  */
7476  if (phba->sli_rev == LPFC_SLI_REV3) {
7477  icmd->ulpCt_h = 1;
7478  icmd->ulpCt_l = 0;
7479  }
7480 
7481  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7482  *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
7483  pcmd += sizeof(uint32_t); /* CSP Word 1 */
7484  memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
7485  sp = (struct serv_parm *) pcmd;
7486  /* Setup CSPs accordingly for Fabric */
7487  sp->cmn.e_d_tov = 0;
7488  sp->cmn.w2.r_a_tov = 0;
7489  sp->cmn.virtual_fabric_support = 0;
7490  sp->cls1.classValid = 0;
7491  sp->cls2.seqDelivery = 1;
7492  sp->cls3.seqDelivery = 1;
7493 
7494  pcmd += sizeof(uint32_t); /* CSP Word 2 */
7495  pcmd += sizeof(uint32_t); /* CSP Word 3 */
7496  pcmd += sizeof(uint32_t); /* CSP Word 4 */
7497  pcmd += sizeof(uint32_t); /* Port Name */
7498  memcpy(pcmd, &vport->fc_portname, 8);
7499  pcmd += sizeof(uint32_t); /* Node Name */
7500  pcmd += sizeof(uint32_t); /* Node Name */
7501  memcpy(pcmd, &vport->fc_nodename, 8);
7502 
7503  lpfc_set_disctmo(vport);
7504 
7505  phba->fc_stat.elsXmitFDISC++;
7506  elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
7507 
7509  "Issue FDISC: did:x%x",
7510  did, 0, 0);
7511 
7512  rc = lpfc_issue_fabric_iocb(phba, elsiocb);
7513  if (rc == IOCB_ERROR) {
7514  lpfc_els_free_iocb(phba, elsiocb);
7516  lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7517  "0256 Issue FDISC: Cannot send IOCB\n");
7518  return 1;
7519  }
7521  return 0;
7522 }
7523 
7538 static void
7539 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7540  struct lpfc_iocbq *rspiocb)
7541 {
7542  struct lpfc_vport *vport = cmdiocb->vport;
7543  IOCB_t *irsp;
7544  struct lpfc_nodelist *ndlp;
7545  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7546 
7547  ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
7548  irsp = &rspiocb->iocb;
7550  "LOGO npiv cmpl: status:x%x/x%x did:x%x",
7551  irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
7552 
7553  lpfc_els_free_iocb(phba, cmdiocb);
7554  vport->unreg_vpi_cmpl = VPORT_ERROR;
7555 
7556  /* Trigger the release of the ndlp after logo */
7557  lpfc_nlp_put(ndlp);
7558 
7559  /* NPIV LOGO completes to NPort <nlp_DID> */
7560  lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7561  "2928 NPIV LOGO completes to NPort x%x "
7562  "Data: x%x x%x x%x x%x\n",
7563  ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
7564  irsp->ulpTimeout, vport->num_disc_nodes);
7565 
7566  if (irsp->ulpStatus == IOSTAT_SUCCESS) {
7567  spin_lock_irq(shost->host_lock);
7568  vport->fc_flag &= ~FC_FABRIC;
7569  spin_unlock_irq(shost->host_lock);
7570  }
7571 }
7572 
7589 int
7591 {
7592  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7593  struct lpfc_hba *phba = vport->phba;
7594  IOCB_t *icmd;
7595  struct lpfc_iocbq *elsiocb;
7596  uint8_t *pcmd;
7597  uint16_t cmdsize;
7598 
7599  cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
7600  elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
7601  ELS_CMD_LOGO);
7602  if (!elsiocb)
7603  return 1;
7604 
7605  icmd = &elsiocb->iocb;
7606  pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7607  *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
7608  pcmd += sizeof(uint32_t);
7609 
7610  /* Fill in LOGO payload */
7611  *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
7612  pcmd += sizeof(uint32_t);
7613  memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
7614 
7616  "Issue LOGO npiv did:x%x flg:x%x",
7617  ndlp->nlp_DID, ndlp->nlp_flag, 0);
7618 
7619  elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
7620  spin_lock_irq(shost->host_lock);
7621  ndlp->nlp_flag |= NLP_LOGO_SND;
7622  spin_unlock_irq(shost->host_lock);
7623  if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7624  IOCB_ERROR) {
7625  spin_lock_irq(shost->host_lock);
7626  ndlp->nlp_flag &= ~NLP_LOGO_SND;
7627  spin_unlock_irq(shost->host_lock);
7628  lpfc_els_free_iocb(phba, elsiocb);
7629  return 1;
7630  }
7631  return 0;
7632 }
7633 
7645 void
7647 {
7648  struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7649  unsigned long iflags;
7650  uint32_t tmo_posted;
7651 
7652  spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
7653  tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
7654  if (!tmo_posted)
7655  phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
7656  spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
7657 
7658  if (!tmo_posted)
7659  lpfc_worker_wake_up(phba);
7660  return;
7661 }
7662 
7673 static void
7674 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
7675 {
7676  struct lpfc_iocbq *iocb;
7677  unsigned long iflags;
7678  int ret;
7679  IOCB_t *cmd;
7680 
7681 repeat:
7682  iocb = NULL;
7683  spin_lock_irqsave(&phba->hbalock, iflags);
7684  /* Post any pending iocb to the SLI layer */
7685  if (atomic_read(&phba->fabric_iocb_count) == 0) {
7686  list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
7687  list);
7688  if (iocb)
7689  /* Increment fabric iocb count to hold the position */
7690  atomic_inc(&phba->fabric_iocb_count);
7691  }
7692  spin_unlock_irqrestore(&phba->hbalock, iflags);
7693  if (iocb) {
7694  iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7695  iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7696  iocb->iocb_flag |= LPFC_IO_FABRIC;
7697 
7699  "Fabric sched1: ste:x%x",
7700  iocb->vport->port_state, 0, 0);
7701 
7702  ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
7703 
7704  if (ret == IOCB_ERROR) {
7705  iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7706  iocb->fabric_iocb_cmpl = NULL;
7707  iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7708  cmd = &iocb->iocb;
7710  cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
7711  iocb->iocb_cmpl(phba, iocb, iocb);
7712 
7713  atomic_dec(&phba->fabric_iocb_count);
7714  goto repeat;
7715  }
7716  }
7717 
7718  return;
7719 }
7720 
7730 void
7732 {
7734 
7735  lpfc_resume_fabric_iocbs(phba);
7736  return;
7737 }
7738 
7748 static void
7749 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7750 {
7751  int blocked;
7752 
7754  /* Start a timer to unblock fabric iocbs after 100ms */
7755  if (!blocked)
7756  mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
7757 
7758  return;
7759 }
7760 
7774 static void
7775 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7776  struct lpfc_iocbq *rspiocb)
7777 {
7778  struct ls_rjt stat;
7779 
7780  if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
7781  BUG();
7782 
7783  switch (rspiocb->iocb.ulpStatus) {
7784  case IOSTAT_NPORT_RJT:
7785  case IOSTAT_FABRIC_RJT:
7786  if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
7787  lpfc_block_fabric_iocbs(phba);
7788  }
7789  break;
7790 
7791  case IOSTAT_NPORT_BSY:
7792  case IOSTAT_FABRIC_BSY:
7793  lpfc_block_fabric_iocbs(phba);
7794  break;
7795 
7796  case IOSTAT_LS_RJT:
7797  stat.un.lsRjtError =
7798  be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
7799  if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
7800  (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
7801  lpfc_block_fabric_iocbs(phba);
7802  break;
7803  }
7804 
7805  if (atomic_read(&phba->fabric_iocb_count) == 0)
7806  BUG();
7807 
7808  cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
7809  cmdiocb->fabric_iocb_cmpl = NULL;
7810  cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
7811  cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
7812 
7813  atomic_dec(&phba->fabric_iocb_count);
7814  if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
7815  /* Post any pending iocbs to HBA */
7816  lpfc_resume_fabric_iocbs(phba);
7817  }
7818 }
7819 
7844 static int
7845 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
7846 {
7847  unsigned long iflags;
7848  int ready;
7849  int ret;
7850 
7851  if (atomic_read(&phba->fabric_iocb_count) > 1)
7852  BUG();
7853 
7854  spin_lock_irqsave(&phba->hbalock, iflags);
7855  ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
7857 
7858  if (ready)
7859  /* Increment fabric iocb count to hold the position */
7860  atomic_inc(&phba->fabric_iocb_count);
7861  spin_unlock_irqrestore(&phba->hbalock, iflags);
7862  if (ready) {
7863  iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
7864  iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
7865  iocb->iocb_flag |= LPFC_IO_FABRIC;
7866 
7868  "Fabric sched2: ste:x%x",
7869  iocb->vport->port_state, 0, 0);
7870 
7871  ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
7872 
7873  if (ret == IOCB_ERROR) {
7874  iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
7875  iocb->fabric_iocb_cmpl = NULL;
7876  iocb->iocb_flag &= ~LPFC_IO_FABRIC;
7877  atomic_dec(&phba->fabric_iocb_count);
7878  }
7879  } else {
7880  spin_lock_irqsave(&phba->hbalock, iflags);
7881  list_add_tail(&iocb->list, &phba->fabric_iocb_list);
7882  spin_unlock_irqrestore(&phba->hbalock, iflags);
7883  ret = IOCB_SUCCESS;
7884  }
7885  return ret;
7886 }
7887 
7899 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
7900 {
7901  LIST_HEAD(completions);
7902  struct lpfc_hba *phba = vport->phba;
7903  struct lpfc_iocbq *tmp_iocb, *piocb;
7904 
7905  spin_lock_irq(&phba->hbalock);
7906  list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7907  list) {
7908 
7909  if (piocb->vport != vport)
7910  continue;
7911 
7912  list_move_tail(&piocb->list, &completions);
7913  }
7914  spin_unlock_irq(&phba->hbalock);
7915 
7916  /* Cancel all the IOCBs from the completions list */
7917  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7919 }
7920 
7933 {
7934  LIST_HEAD(completions);
7935  struct lpfc_hba *phba = ndlp->phba;
7936  struct lpfc_iocbq *tmp_iocb, *piocb;
7937  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7938 
7939  spin_lock_irq(&phba->hbalock);
7940  list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
7941  list) {
7942  if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
7943 
7944  list_move_tail(&piocb->list, &completions);
7945  }
7946  }
7947  spin_unlock_irq(&phba->hbalock);
7948 
7949  /* Cancel all the IOCBs from the completions list */
7950  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7952 }
7953 
7966 {
7967  LIST_HEAD(completions);
7968 
7969  spin_lock_irq(&phba->hbalock);
7970  list_splice_init(&phba->fabric_iocb_list, &completions);
7971  spin_unlock_irq(&phba->hbalock);
7972 
7973  /* Cancel all the IOCBs from the completions list */
7974  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7976 }
7977 
7985 void
7987 {
7988  struct lpfc_hba *phba = vport->phba;
7989  struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7990  unsigned long iflag = 0;
7991 
7992  spin_lock_irqsave(&phba->hbalock, iflag);
7993  spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7994  list_for_each_entry_safe(sglq_entry, sglq_next,
7995  &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7996  if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
7997  sglq_entry->ndlp = NULL;
7998  }
7999  spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8000  spin_unlock_irqrestore(&phba->hbalock, iflag);
8001  return;
8002 }
8003 
8012 void
8014  struct sli4_wcqe_xri_aborted *axri)
8015 {
8016  uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
8017  uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
8018  uint16_t lxri = 0;
8019 
8020  struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8021  unsigned long iflag = 0;
8022  struct lpfc_nodelist *ndlp;
8023  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8024 
8025  spin_lock_irqsave(&phba->hbalock, iflag);
8026  spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
8027  list_for_each_entry_safe(sglq_entry, sglq_next,
8028  &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
8029  if (sglq_entry->sli4_xritag == xri) {
8030  list_del(&sglq_entry->list);
8031  ndlp = sglq_entry->ndlp;
8032  sglq_entry->ndlp = NULL;
8033  list_add_tail(&sglq_entry->list,
8034  &phba->sli4_hba.lpfc_sgl_list);
8035  sglq_entry->state = SGL_FREED;
8036  spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8037  spin_unlock_irqrestore(&phba->hbalock, iflag);
8038  lpfc_set_rrq_active(phba, ndlp,
8039  sglq_entry->sli4_lxritag,
8040  rxid, 1);
8041 
8042  /* Check if TXQ queue needs to be serviced */
8043  if (pring->txq_cnt)
8044  lpfc_worker_wake_up(phba);
8045  return;
8046  }
8047  }
8048  spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8049  lxri = lpfc_sli4_xri_inrange(phba, xri);
8050  if (lxri == NO_XRI) {
8051  spin_unlock_irqrestore(&phba->hbalock, iflag);
8052  return;
8053  }
8054  sglq_entry = __lpfc_get_active_sglq(phba, lxri);
8055  if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
8056  spin_unlock_irqrestore(&phba->hbalock, iflag);
8057  return;
8058  }
8059  sglq_entry->state = SGL_XRI_ABORTED;
8060  spin_unlock_irqrestore(&phba->hbalock, iflag);
8061  return;
8062 }
8063 
8064 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
8065  * @vport: pointer to virtual port object.
8066  * @ndlp: nodelist pointer for the impacted node.
8067  *
8068  * The driver calls this routine in response to an SLI4 XRI ABORT CQE
8069  * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
8070  * the driver is required to send a LOGO to the remote node before it
8071  * attempts to recover its login to the remote node.
8072  */
8073 void
8075  struct lpfc_nodelist *ndlp)
8076 {
8077  struct Scsi_Host *shost;
8078  struct lpfc_hba *phba;
8079  unsigned long flags = 0;
8080 
8081  shost = lpfc_shost_from_vport(vport);
8082  phba = vport->phba;
8083  if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8084  lpfc_printf_log(phba, KERN_INFO,
8085  LOG_SLI, "3093 No rport recovery needed. "
8086  "rport in state 0x%x\n", ndlp->nlp_state);
8087  return;
8088  }
8090  "3094 Start rport recovery on shost id 0x%x "
8091  "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8092  "flags 0x%x\n",
8093  shost->host_no, ndlp->nlp_DID,
8094  vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8095  ndlp->nlp_flag);
8096  /*
8097  * The rport is not responding. Remove the FCP-2 flag to prevent
8098  * an ADISC in the follow-up recovery code.
8099  */
8100  spin_lock_irqsave(shost->host_lock, flags);
8101  ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8102  spin_unlock_irqrestore(shost->host_lock, flags);
8103  lpfc_issue_els_logo(vport, ndlp, 0);
8104  lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
8105 }
8106