Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_init.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
36 
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_transport_fc.h>
41 
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
53 #include "lpfc_version.h"
54 
56 unsigned long _dump_buf_data_order;
58 unsigned long _dump_buf_dif_order;
60 
61 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62 static int lpfc_post_rcv_buf(struct lpfc_hba *);
63 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65 static int lpfc_setup_endian_order(struct lpfc_hba *);
66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
68 static void lpfc_init_sgl_list(struct lpfc_hba *);
69 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70 static void lpfc_free_active_sgl(struct lpfc_hba *);
71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
77 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
78 
79 static struct scsi_transport_template *lpfc_transport_template = NULL;
80 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
81 static DEFINE_IDR(lpfc_hba_index);
82 
97 int
99 {
100  lpfc_vpd_t *vp = &phba->vpd;
101  int i = 0, rc;
102  LPFC_MBOXQ_t *pmb;
103  MAILBOX_t *mb;
104  char *lpfc_vpd_data = NULL;
105  uint16_t offset = 0;
106  static char licensed[56] =
107  "key unlock for use with gnu public licensed code only\0";
108  static int init_key = 1;
109 
110  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
111  if (!pmb) {
112  phba->link_state = LPFC_HBA_ERROR;
113  return -ENOMEM;
114  }
115 
116  mb = &pmb->u.mb;
118 
119  if (lpfc_is_LC_HBA(phba->pcidev->device)) {
120  if (init_key) {
121  uint32_t *ptext = (uint32_t *) licensed;
122 
123  for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
124  *ptext = cpu_to_be32(*ptext);
125  init_key = 0;
126  }
127 
128  lpfc_read_nv(phba, pmb);
129  memset((char*)mb->un.varRDnvp.rsvd3, 0,
130  sizeof (mb->un.varRDnvp.rsvd3));
131  memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
132  sizeof (licensed));
133 
134  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
135 
136  if (rc != MBX_SUCCESS) {
138  "0324 Config Port initialization "
139  "error, mbxCmd x%x READ_NVPARM, "
140  "mbxStatus x%x\n",
141  mb->mbxCommand, mb->mbxStatus);
142  mempool_free(pmb, phba->mbox_mem_pool);
143  return -ERESTART;
144  }
145  memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
146  sizeof(phba->wwnn));
147  memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
148  sizeof(phba->wwpn));
149  }
150 
151  phba->sli3_options = 0x0;
152 
153  /* Setup and issue mailbox READ REV command */
154  lpfc_read_rev(phba, pmb);
155  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156  if (rc != MBX_SUCCESS) {
158  "0439 Adapter failed to init, mbxCmd x%x "
159  "READ_REV, mbxStatus x%x\n",
160  mb->mbxCommand, mb->mbxStatus);
161  mempool_free( pmb, phba->mbox_mem_pool);
162  return -ERESTART;
163  }
164 
165 
166  /*
167  * The value of rr must be 1 since the driver set the cv field to 1.
168  * This setting requires the FW to set all revision fields.
169  */
170  if (mb->un.varRdRev.rr == 0) {
171  vp->rev.rBit = 0;
173  "0440 Adapter failed to init, READ_REV has "
174  "missing revision information.\n");
175  mempool_free(pmb, phba->mbox_mem_pool);
176  return -ERESTART;
177  }
178 
179  if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
180  mempool_free(pmb, phba->mbox_mem_pool);
181  return -EINVAL;
182  }
183 
184  /* Save information as VPD data */
185  vp->rev.rBit = 1;
186  memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
187  vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
188  memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
189  vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
190  memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
191  vp->rev.biuRev = mb->un.varRdRev.biuRev;
192  vp->rev.smRev = mb->un.varRdRev.smRev;
193  vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
194  vp->rev.endecRev = mb->un.varRdRev.endecRev;
195  vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
196  vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
197  vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
198  vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
199  vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
200  vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
201 
202  /* If the sli feature level is less then 9, we must
203  * tear down all RPIs and VPIs on link down if NPIV
204  * is enabled.
205  */
206  if (vp->rev.feaLevelHigh < 9)
208 
209  if (lpfc_is_LC_HBA(phba->pcidev->device))
210  memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
211  sizeof (phba->RandomData));
212 
213  /* Get adapter VPD information */
214  lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
215  if (!lpfc_vpd_data)
216  goto out_free_mbox;
217  do {
218  lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
219  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
220 
221  if (rc != MBX_SUCCESS) {
223  "0441 VPD not present on adapter, "
224  "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
225  mb->mbxCommand, mb->mbxStatus);
226  mb->un.varDmp.word_cnt = 0;
227  }
228  /* dump mem may return a zero when finished or we got a
229  * mailbox error, either way we are done.
230  */
231  if (mb->un.varDmp.word_cnt == 0)
232  break;
233  if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
236  lpfc_vpd_data + offset,
237  mb->un.varDmp.word_cnt);
238  offset += mb->un.varDmp.word_cnt;
239  } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
240  lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
241 
242  kfree(lpfc_vpd_data);
243 out_free_mbox:
244  mempool_free(pmb, phba->mbox_mem_pool);
245  return 0;
246 }
247 
258 static void
259 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
260 {
261  if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
262  phba->temp_sensor_support = 1;
263  else
264  phba->temp_sensor_support = 0;
265  mempool_free(pmboxq, phba->mbox_mem_pool);
266  return;
267 }
268 
279 static void
280 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
281 {
282  struct prog_id *prg;
283  uint32_t prog_id_word;
284  char dist = ' ';
285  /* character array used for decoding dist type. */
286  char dist_char[] = "nabx";
287 
288  if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
289  mempool_free(pmboxq, phba->mbox_mem_pool);
290  return;
291  }
292 
293  prg = (struct prog_id *) &prog_id_word;
294 
295  /* word 7 contain option rom version */
296  prog_id_word = pmboxq->u.mb.un.varWords[7];
297 
298  /* Decode the Option rom version word to a readable string */
299  if (prg->dist < 4)
300  dist = dist_char[prg->dist];
301 
302  if ((prg->dist == 3) && (prg->num == 0))
303  sprintf(phba->OptionROMVersion, "%d.%d%d",
304  prg->ver, prg->rev, prg->lev);
305  else
306  sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
307  prg->ver, prg->rev, prg->lev,
308  dist, prg->num);
309  mempool_free(pmboxq, phba->mbox_mem_pool);
310  return;
311 }
312 
322 void
324 {
325  /* If the soft name exists then update it using the service params */
326  if (vport->phba->cfg_soft_wwnn)
327  u64_to_wwn(vport->phba->cfg_soft_wwnn,
328  vport->fc_sparam.nodeName.u.wwn);
329  if (vport->phba->cfg_soft_wwpn)
330  u64_to_wwn(vport->phba->cfg_soft_wwpn,
331  vport->fc_sparam.portName.u.wwn);
332 
333  /*
334  * If the name is empty or there exists a soft name
335  * then copy the service params name, otherwise use the fc name
336  */
337  if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
338  memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
339  sizeof(struct lpfc_name));
340  else
341  memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
342  sizeof(struct lpfc_name));
343 
344  if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
345  memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
346  sizeof(struct lpfc_name));
347  else
348  memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
349  sizeof(struct lpfc_name));
350 }
351 
365 int
367 {
368  struct lpfc_vport *vport = phba->pport;
369  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
370  LPFC_MBOXQ_t *pmb;
371  MAILBOX_t *mb;
372  struct lpfc_dmabuf *mp;
373  struct lpfc_sli *psli = &phba->sli;
375  int i, j;
376  int rc;
377 
378  spin_lock_irq(&phba->hbalock);
379  /*
380  * If the Config port completed correctly the HBA is not
381  * over heated any more.
382  */
383  if (phba->over_temp_state == HBA_OVER_TEMP)
385  spin_unlock_irq(&phba->hbalock);
386 
387  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
388  if (!pmb) {
389  phba->link_state = LPFC_HBA_ERROR;
390  return -ENOMEM;
391  }
392  mb = &pmb->u.mb;
393 
394  /* Get login parameters for NID. */
395  rc = lpfc_read_sparam(phba, pmb, 0);
396  if (rc) {
397  mempool_free(pmb, phba->mbox_mem_pool);
398  return -ENOMEM;
399  }
400 
401  pmb->vport = vport;
402  if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
404  "0448 Adapter failed init, mbxCmd x%x "
405  "READ_SPARM mbxStatus x%x\n",
406  mb->mbxCommand, mb->mbxStatus);
407  phba->link_state = LPFC_HBA_ERROR;
408  mp = (struct lpfc_dmabuf *) pmb->context1;
409  mempool_free(pmb, phba->mbox_mem_pool);
410  lpfc_mbuf_free(phba, mp->virt, mp->phys);
411  kfree(mp);
412  return -EIO;
413  }
414 
415  mp = (struct lpfc_dmabuf *) pmb->context1;
416 
417  memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
418  lpfc_mbuf_free(phba, mp->virt, mp->phys);
419  kfree(mp);
420  pmb->context1 = NULL;
421  lpfc_update_vport_wwn(vport);
422 
423  /* Update the fc_host data structures with new wwn. */
424  fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
425  fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
426  fc_host_max_npiv_vports(shost) = phba->max_vpi;
427 
428  /* If no serial number in VPD data, use low 6 bytes of WWNN */
429  /* This should be consolidated into parse_vpd ? - mr */
430  if (phba->SerialNumber[0] == 0) {
431  uint8_t *outptr;
432 
433  outptr = &vport->fc_nodename.u.s.IEEE[0];
434  for (i = 0; i < 12; i++) {
435  status = *outptr++;
436  j = ((status & 0xf0) >> 4);
437  if (j <= 9)
438  phba->SerialNumber[i] =
439  (char)((uint8_t) 0x30 + (uint8_t) j);
440  else
441  phba->SerialNumber[i] =
442  (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
443  i++;
444  j = (status & 0xf);
445  if (j <= 9)
446  phba->SerialNumber[i] =
447  (char)((uint8_t) 0x30 + (uint8_t) j);
448  else
449  phba->SerialNumber[i] =
450  (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
451  }
452  }
453 
454  lpfc_read_config(phba, pmb);
455  pmb->vport = vport;
456  if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
458  "0453 Adapter failed to init, mbxCmd x%x "
459  "READ_CONFIG, mbxStatus x%x\n",
460  mb->mbxCommand, mb->mbxStatus);
461  phba->link_state = LPFC_HBA_ERROR;
462  mempool_free( pmb, phba->mbox_mem_pool);
463  return -EIO;
464  }
465 
466  /* Check if the port is disabled */
468 
469  /* Reset the DFT_HBA_Q_DEPTH to the max xri */
470  if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
471  phba->cfg_hba_queue_depth =
472  (mb->un.varRdConfig.max_xri + 1) -
474 
475  phba->lmt = mb->un.varRdConfig.lmt;
476 
477  /* Get the default values for Model Name and Description */
478  lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
479 
480  phba->link_state = LPFC_LINK_DOWN;
481 
482  /* Only process IOCBs on ELS ring till hba_state is READY */
483  if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
484  psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
485  if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
486  psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
487  if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
488  psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
489 
490  /* Post receive buffers for desired rings */
491  if (phba->sli_rev != 3)
492  lpfc_post_rcv_buf(phba);
493 
494  /*
495  * Configure HBA MSI-X attention conditions to messages if MSI-X mode
496  */
497  if (phba->intr_type == MSIX) {
498  rc = lpfc_config_msi(phba, pmb);
499  if (rc) {
500  mempool_free(pmb, phba->mbox_mem_pool);
501  return -EIO;
502  }
503  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
504  if (rc != MBX_SUCCESS) {
506  "0352 Config MSI mailbox command "
507  "failed, mbxCmd x%x, mbxStatus x%x\n",
508  pmb->u.mb.mbxCommand,
509  pmb->u.mb.mbxStatus);
510  mempool_free(pmb, phba->mbox_mem_pool);
511  return -EIO;
512  }
513  }
514 
515  spin_lock_irq(&phba->hbalock);
516  /* Initialize ERATT handling flag */
517  phba->hba_flag &= ~HBA_ERATT_HANDLED;
518 
519  /* Enable appropriate host interrupts */
520  if (lpfc_readl(phba->HCregaddr, &status)) {
521  spin_unlock_irq(&phba->hbalock);
522  return -EIO;
523  }
524  status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
525  if (psli->num_rings > 0)
526  status |= HC_R0INT_ENA;
527  if (psli->num_rings > 1)
528  status |= HC_R1INT_ENA;
529  if (psli->num_rings > 2)
530  status |= HC_R2INT_ENA;
531  if (psli->num_rings > 3)
532  status |= HC_R3INT_ENA;
533 
534  if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
535  (phba->cfg_poll & DISABLE_FCP_RING_INT))
536  status &= ~(HC_R0INT_ENA);
537 
538  writel(status, phba->HCregaddr);
539  readl(phba->HCregaddr); /* flush */
540  spin_unlock_irq(&phba->hbalock);
541 
542  /* Set up ring-0 (ELS) timer */
543  timeout = phba->fc_ratov * 2;
544  mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
545  /* Set up heart beat (HB) timer */
547  phba->hb_outstanding = 0;
549  /* Set up error attention (ERATT) polling timer */
551 
552  if (phba->hba_flag & LINK_DISABLED) {
553  lpfc_printf_log(phba,
555  "2598 Adapter Link is disabled.\n");
556  lpfc_down_link(phba, pmb);
558  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559  if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
560  lpfc_printf_log(phba,
562  "2599 Adapter failed to issue DOWN_LINK"
563  " mbox command rc 0x%x\n", rc);
564 
565  mempool_free(pmb, phba->mbox_mem_pool);
566  return -EIO;
567  }
568  } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
569  mempool_free(pmb, phba->mbox_mem_pool);
570  rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
571  if (rc)
572  return rc;
573  }
574  /* MBOX buffer will be freed in mbox compl */
575  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
576  if (!pmb) {
577  phba->link_state = LPFC_HBA_ERROR;
578  return -ENOMEM;
579  }
580 
581  lpfc_config_async(phba, pmb, LPFC_ELS_RING);
582  pmb->mbox_cmpl = lpfc_config_async_cmpl;
583  pmb->vport = phba->pport;
584  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
585 
586  if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
587  lpfc_printf_log(phba,
588  KERN_ERR,
589  LOG_INIT,
590  "0456 Adapter failed to issue "
591  "ASYNCEVT_ENABLE mbox status x%x\n",
592  rc);
593  mempool_free(pmb, phba->mbox_mem_pool);
594  }
595 
596  /* Get Option rom version */
597  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
598  if (!pmb) {
599  phba->link_state = LPFC_HBA_ERROR;
600  return -ENOMEM;
601  }
602 
603  lpfc_dump_wakeup_param(phba, pmb);
604  pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
605  pmb->vport = phba->pport;
606  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 
608  if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
609  lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
610  "to get Option ROM version status x%x\n", rc);
611  mempool_free(pmb, phba->mbox_mem_pool);
612  }
613 
614  return 0;
615 }
616 
631 int
633 {
634  return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
635 }
636 
652 int
654  uint32_t flag)
655 {
656  struct lpfc_vport *vport = phba->pport;
657  LPFC_MBOXQ_t *pmb;
658  MAILBOX_t *mb;
659  int rc;
660 
661  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
662  if (!pmb) {
663  phba->link_state = LPFC_HBA_ERROR;
664  return -ENOMEM;
665  }
666  mb = &pmb->u.mb;
667  pmb->vport = vport;
668 
671  !(phba->lmt & LMT_1Gb)) ||
673  !(phba->lmt & LMT_2Gb)) ||
675  !(phba->lmt & LMT_4Gb)) ||
677  !(phba->lmt & LMT_8Gb)) ||
679  !(phba->lmt & LMT_10Gb)) ||
681  !(phba->lmt & LMT_16Gb))) {
682  /* Reset link speed to auto */
684  "1302 Invalid speed for this board:%d "
685  "Reset link speed to auto.\n",
686  phba->cfg_link_speed);
688  }
689  lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
691  if (phba->sli_rev < LPFC_SLI_REV4)
692  lpfc_set_loopback_flag(phba);
693  rc = lpfc_sli_issue_mbox(phba, pmb, flag);
694  if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
696  "0498 Adapter failed to init, mbxCmd x%x "
697  "INIT_LINK, mbxStatus x%x\n",
698  mb->mbxCommand, mb->mbxStatus);
699  if (phba->sli_rev <= LPFC_SLI_REV3) {
700  /* Clear all interrupt enable conditions */
701  writel(0, phba->HCregaddr);
702  readl(phba->HCregaddr); /* flush */
703  /* Clear all pending interrupts */
704  writel(0xffffffff, phba->HAregaddr);
705  readl(phba->HAregaddr); /* flush */
706  }
707  phba->link_state = LPFC_HBA_ERROR;
708  if (rc != MBX_BUSY || flag == MBX_POLL)
709  mempool_free(pmb, phba->mbox_mem_pool);
710  return -EIO;
711  }
713  if (flag == MBX_POLL)
714  mempool_free(pmb, phba->mbox_mem_pool);
715 
716  return 0;
717 }
718 
732 int
734 {
735  LPFC_MBOXQ_t *pmb;
736  int rc;
737 
738  pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
739  if (!pmb) {
740  phba->link_state = LPFC_HBA_ERROR;
741  return -ENOMEM;
742  }
743 
744  lpfc_printf_log(phba,
746  "0491 Adapter Link is disabled.\n");
747  lpfc_down_link(phba, pmb);
749  rc = lpfc_sli_issue_mbox(phba, pmb, flag);
750  if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
751  lpfc_printf_log(phba,
753  "2522 Adapter failed to issue DOWN_LINK"
754  " mbox command rc 0x%x\n", rc);
755 
756  mempool_free(pmb, phba->mbox_mem_pool);
757  return -EIO;
758  }
759  if (flag == MBX_POLL)
760  mempool_free(pmb, phba->mbox_mem_pool);
761 
762  return 0;
763 }
764 
776 int
778 {
779  struct lpfc_vport **vports;
780  int i;
781 
782  if (phba->sli_rev <= LPFC_SLI_REV3) {
783  /* Disable interrupts */
784  writel(0, phba->HCregaddr);
785  readl(phba->HCregaddr); /* flush */
786  }
787 
788  if (phba->pport->load_flag & FC_UNLOADING)
790  else {
791  vports = lpfc_create_vport_work_array(phba);
792  if (vports != NULL)
793  for (i = 0; i <= phba->max_vports &&
794  vports[i] != NULL; i++)
796  lpfc_destroy_vport_work_array(phba, vports);
797  }
798  return 0;
799 }
800 
812 static int
813 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
814 {
815  struct lpfc_sli *psli = &phba->sli;
816  struct lpfc_sli_ring *pring;
817  struct lpfc_dmabuf *mp, *next_mp;
818  LIST_HEAD(completions);
819  int i;
820 
823  else {
824  /* Cleanup preposted buffers on the ELS ring */
825  pring = &psli->ring[LPFC_ELS_RING];
826  list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
827  list_del(&mp->list);
828  pring->postbufq_cnt--;
829  lpfc_mbuf_free(phba, mp->virt, mp->phys);
830  kfree(mp);
831  }
832  }
833 
834  spin_lock_irq(&phba->hbalock);
835  for (i = 0; i < psli->num_rings; i++) {
836  pring = &psli->ring[i];
837 
838  /* At this point in time the HBA is either reset or DOA. Either
839  * way, nothing should be on txcmplq as it will NEVER complete.
840  */
841  list_splice_init(&pring->txcmplq, &completions);
842  pring->txcmplq_cnt = 0;
843  spin_unlock_irq(&phba->hbalock);
844 
845  /* Cancel all the IOCBs from the completions list */
846  lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
848 
849  lpfc_sli_abort_iocb_ring(phba, pring);
850  spin_lock_irq(&phba->hbalock);
851  }
852  spin_unlock_irq(&phba->hbalock);
853 
854  return 0;
855 }
856 
868 static int
869 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
870 {
871  struct lpfc_scsi_buf *psb, *psb_next;
872  LIST_HEAD(aborts);
873  int ret;
874  unsigned long iflag = 0;
875  struct lpfc_sglq *sglq_entry = NULL;
876 
877  ret = lpfc_hba_down_post_s3(phba);
878  if (ret)
879  return ret;
880  /* At this point in time the HBA is either reset or DOA. Either
881  * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
882  * on the lpfc_sgl_list so that it can either be freed if the
883  * driver is unloading or reposted if the driver is restarting
884  * the port.
885  */
886  spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
887  /* scsl_buf_list */
888  /* abts_sgl_list_lock required because worker thread uses this
889  * list.
890  */
891  spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
892  list_for_each_entry(sglq_entry,
893  &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
894  sglq_entry->state = SGL_FREED;
895 
896  list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
897  &phba->sli4_hba.lpfc_sgl_list);
898  spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
899  /* abts_scsi_buf_list_lock required because worker thread uses this
900  * list.
901  */
902  spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
903  list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
904  &aborts);
905  spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
906  spin_unlock_irq(&phba->hbalock);
907 
908  list_for_each_entry_safe(psb, psb_next, &aborts, list) {
909  psb->pCmd = NULL;
910  psb->status = IOSTAT_SUCCESS;
911  }
912  spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
913  list_splice(&aborts, &phba->lpfc_scsi_buf_list);
914  spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
915  return 0;
916 }
917 
929 int
931 {
932  return (*phba->lpfc_hba_down_post)(phba);
933 }
934 
947 static void
948 lpfc_hb_timeout(unsigned long ptr)
949 {
950  struct lpfc_hba *phba;
951  uint32_t tmo_posted;
952  unsigned long iflag;
953 
954  phba = (struct lpfc_hba *)ptr;
955 
956  /* Check for heart beat timeout conditions */
957  spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
958  tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
959  if (!tmo_posted)
960  phba->pport->work_port_events |= WORKER_HB_TMO;
961  spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
962 
963  /* Tell the worker thread there is work to do */
964  if (!tmo_posted)
965  lpfc_worker_wake_up(phba);
966  return;
967 }
968 
981 static void
982 lpfc_rrq_timeout(unsigned long ptr)
983 {
984  struct lpfc_hba *phba;
985  unsigned long iflag;
986 
987  phba = (struct lpfc_hba *)ptr;
988  spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
989  phba->hba_flag |= HBA_RRQ_ACTIVE;
990  spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
991  lpfc_worker_wake_up(phba);
992 }
993 
1010 static void
1011 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1012 {
1013  unsigned long drvr_flag;
1014 
1015  spin_lock_irqsave(&phba->hbalock, drvr_flag);
1016  phba->hb_outstanding = 0;
1017  spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1018 
1019  /* Check and reset heart-beat timer is necessary */
1020  mempool_free(pmboxq, phba->mbox_mem_pool);
1021  if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1022  !(phba->link_state == LPFC_HBA_ERROR) &&
1023  !(phba->pport->load_flag & FC_UNLOADING))
1024  mod_timer(&phba->hb_tmofunc,
1025  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1026  return;
1027 }
1028 
1045 void
1047 {
1048  struct lpfc_vport **vports;
1049  LPFC_MBOXQ_t *pmboxq;
1050  struct lpfc_dmabuf *buf_ptr;
1051  int retval, i;
1052  struct lpfc_sli *psli = &phba->sli;
1053  LIST_HEAD(completions);
1054 
1055  vports = lpfc_create_vport_work_array(phba);
1056  if (vports != NULL)
1057  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1058  lpfc_rcv_seq_check_edtov(vports[i]);
1059  lpfc_destroy_vport_work_array(phba, vports);
1060 
1061  if ((phba->link_state == LPFC_HBA_ERROR) ||
1062  (phba->pport->load_flag & FC_UNLOADING) ||
1063  (phba->pport->fc_flag & FC_OFFLINE_MODE))
1064  return;
1065 
1066  spin_lock_irq(&phba->pport->work_port_lock);
1067 
1069  jiffies)) {
1070  spin_unlock_irq(&phba->pport->work_port_lock);
1071  if (!phba->hb_outstanding)
1072  mod_timer(&phba->hb_tmofunc,
1074  else
1075  mod_timer(&phba->hb_tmofunc,
1076  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1077  return;
1078  }
1079  spin_unlock_irq(&phba->pport->work_port_lock);
1080 
1081  if (phba->elsbuf_cnt &&
1082  (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1083  spin_lock_irq(&phba->hbalock);
1084  list_splice_init(&phba->elsbuf, &completions);
1085  phba->elsbuf_cnt = 0;
1086  phba->elsbuf_prev_cnt = 0;
1087  spin_unlock_irq(&phba->hbalock);
1088 
1089  while (!list_empty(&completions)) {
1090  list_remove_head(&completions, buf_ptr,
1091  struct lpfc_dmabuf, list);
1092  lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1093  kfree(buf_ptr);
1094  }
1095  }
1096  phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1097 
1098  /* If there is no heart beat outstanding, issue a heartbeat command */
1099  if (phba->cfg_enable_hba_heartbeat) {
1100  if (!phba->hb_outstanding) {
1101  if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1102  (list_empty(&psli->mboxq))) {
1103  pmboxq = mempool_alloc(phba->mbox_mem_pool,
1104  GFP_KERNEL);
1105  if (!pmboxq) {
1106  mod_timer(&phba->hb_tmofunc,
1107  jiffies +
1108  HZ * LPFC_HB_MBOX_INTERVAL);
1109  return;
1110  }
1111 
1112  lpfc_heart_beat(phba, pmboxq);
1113  pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1114  pmboxq->vport = phba->pport;
1115  retval = lpfc_sli_issue_mbox(phba, pmboxq,
1116  MBX_NOWAIT);
1117 
1118  if (retval != MBX_BUSY &&
1119  retval != MBX_SUCCESS) {
1120  mempool_free(pmboxq,
1121  phba->mbox_mem_pool);
1122  mod_timer(&phba->hb_tmofunc,
1123  jiffies +
1124  HZ * LPFC_HB_MBOX_INTERVAL);
1125  return;
1126  }
1127  phba->skipped_hb = 0;
1128  phba->hb_outstanding = 1;
1129  } else if (time_before_eq(phba->last_completion_time,
1130  phba->skipped_hb)) {
1132  "2857 Last completion time not "
1133  " updated in %d ms\n",
1135  - phba->last_completion_time));
1136  } else
1137  phba->skipped_hb = jiffies;
1138 
1139  mod_timer(&phba->hb_tmofunc,
1140  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1141  return;
1142  } else {
1143  /*
1144  * If heart beat timeout called with hb_outstanding set
1145  * we need to give the hb mailbox cmd a chance to
1146  * complete or TMO.
1147  */
1149  "0459 Adapter heartbeat still out"
1150  "standing:last compl time was %d ms.\n",
1152  - phba->last_completion_time));
1153  mod_timer(&phba->hb_tmofunc,
1154  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1155  }
1156  }
1157 }
1158 
1166 static void
1167 lpfc_offline_eratt(struct lpfc_hba *phba)
1168 {
1169  struct lpfc_sli *psli = &phba->sli;
1170 
1171  spin_lock_irq(&phba->hbalock);
1172  psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1173  spin_unlock_irq(&phba->hbalock);
1175 
1176  lpfc_offline(phba);
1177  lpfc_reset_barrier(phba);
1178  spin_lock_irq(&phba->hbalock);
1179  lpfc_sli_brdreset(phba);
1180  spin_unlock_irq(&phba->hbalock);
1181  lpfc_hba_down_post(phba);
1182  lpfc_sli_brdready(phba, HS_MBRDY);
1183  lpfc_unblock_mgmt_io(phba);
1184  phba->link_state = LPFC_HBA_ERROR;
1185  return;
1186 }
1187 
1195 static void
1196 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1197 {
1199  lpfc_offline(phba);
1200  lpfc_sli4_brdreset(phba);
1201  lpfc_hba_down_post(phba);
1203  lpfc_unblock_mgmt_io(phba);
1204  phba->link_state = LPFC_HBA_ERROR;
1205 }
1206 
1216 static void
1217 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1218 {
1219  uint32_t old_host_status = phba->work_hs;
1220  struct lpfc_sli_ring *pring;
1221  struct lpfc_sli *psli = &phba->sli;
1222 
1223  /* If the pci channel is offline, ignore possible errors,
1224  * since we cannot communicate with the pci card anyway.
1225  */
1226  if (pci_channel_offline(phba->pcidev)) {
1227  spin_lock_irq(&phba->hbalock);
1228  phba->hba_flag &= ~DEFER_ERATT;
1229  spin_unlock_irq(&phba->hbalock);
1230  return;
1231  }
1232 
1234  "0479 Deferred Adapter Hardware Error "
1235  "Data: x%x x%x x%x\n",
1236  phba->work_hs,
1237  phba->work_status[0], phba->work_status[1]);
1238 
1239  spin_lock_irq(&phba->hbalock);
1240  psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1241  spin_unlock_irq(&phba->hbalock);
1242 
1243 
1244  /*
1245  * Firmware stops when it triggred erratt. That could cause the I/Os
1246  * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1247  * SCSI layer retry it after re-establishing link.
1248  */
1249  pring = &psli->ring[psli->fcp_ring];
1250  lpfc_sli_abort_iocb_ring(phba, pring);
1251 
1252  /*
1253  * There was a firmware error. Take the hba offline and then
1254  * attempt to restart it.
1255  */
1257  lpfc_offline(phba);
1258 
1259  /* Wait for the ER1 bit to clear.*/
1260  while (phba->work_hs & HS_FFER1) {
1261  msleep(100);
1262  if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1263  phba->work_hs = UNPLUG_ERR ;
1264  break;
1265  }
1266  /* If driver is unloading let the worker thread continue */
1267  if (phba->pport->load_flag & FC_UNLOADING) {
1268  phba->work_hs = 0;
1269  break;
1270  }
1271  }
1272 
1273  /*
1274  * This is to ptrotect against a race condition in which
1275  * first write to the host attention register clear the
1276  * host status register.
1277  */
1278  if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1279  phba->work_hs = old_host_status & ~HS_FFER1;
1280 
1281  spin_lock_irq(&phba->hbalock);
1282  phba->hba_flag &= ~DEFER_ERATT;
1283  spin_unlock_irq(&phba->hbalock);
1284  phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1285  phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1286 }
1287 
1288 static void
1289 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1290 {
1291  struct lpfc_board_event_header board_event;
1292  struct Scsi_Host *shost;
1293 
1294  board_event.event_type = FC_REG_BOARD_EVENT;
1295  board_event.subcategory = LPFC_EVENT_PORTINTERR;
1296  shost = lpfc_shost_from_vport(phba->pport);
1298  sizeof(board_event),
1299  (char *) &board_event,
1301 }
1302 
1313 static void
1314 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1315 {
1316  struct lpfc_vport *vport = phba->pport;
1317  struct lpfc_sli *psli = &phba->sli;
1318  struct lpfc_sli_ring *pring;
1320  unsigned long temperature;
1321  struct temp_event temp_event_data;
1322  struct Scsi_Host *shost;
1323 
1324  /* If the pci channel is offline, ignore possible errors,
1325  * since we cannot communicate with the pci card anyway.
1326  */
1327  if (pci_channel_offline(phba->pcidev)) {
1328  spin_lock_irq(&phba->hbalock);
1329  phba->hba_flag &= ~DEFER_ERATT;
1330  spin_unlock_irq(&phba->hbalock);
1331  return;
1332  }
1333 
1334  /* If resets are disabled then leave the HBA alone and return */
1335  if (!phba->cfg_enable_hba_reset)
1336  return;
1337 
1338  /* Send an internal error event to mgmt application */
1339  lpfc_board_errevt_to_mgmt(phba);
1340 
1341  if (phba->hba_flag & DEFER_ERATT)
1342  lpfc_handle_deferred_eratt(phba);
1343 
1344  if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1345  if (phba->work_hs & HS_FFER6)
1346  /* Re-establishing Link */
1348  "1301 Re-establishing Link "
1349  "Data: x%x x%x x%x\n",
1350  phba->work_hs, phba->work_status[0],
1351  phba->work_status[1]);
1352  if (phba->work_hs & HS_FFER8)
1353  /* Device Zeroization */
1355  "2861 Host Authentication device "
1356  "zeroization Data:x%x x%x x%x\n",
1357  phba->work_hs, phba->work_status[0],
1358  phba->work_status[1]);
1359 
1360  spin_lock_irq(&phba->hbalock);
1361  psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1362  spin_unlock_irq(&phba->hbalock);
1363 
1364  /*
1365  * Firmware stops when it triggled erratt with HS_FFER6.
1366  * That could cause the I/Os dropped by the firmware.
1367  * Error iocb (I/O) on txcmplq and let the SCSI layer
1368  * retry it after re-establishing link.
1369  */
1370  pring = &psli->ring[psli->fcp_ring];
1371  lpfc_sli_abort_iocb_ring(phba, pring);
1372 
1373  /*
1374  * There was a firmware error. Take the hba offline and then
1375  * attempt to restart it.
1376  */
1378  lpfc_offline(phba);
1379  lpfc_sli_brdrestart(phba);
1380  if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1381  lpfc_unblock_mgmt_io(phba);
1382  return;
1383  }
1384  lpfc_unblock_mgmt_io(phba);
1385  } else if (phba->work_hs & HS_CRIT_TEMP) {
1386  temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1387  temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1388  temp_event_data.event_code = LPFC_CRIT_TEMP;
1389  temp_event_data.data = (uint32_t)temperature;
1390 
1392  "0406 Adapter maximum temperature exceeded "
1393  "(%ld), taking this port offline "
1394  "Data: x%x x%x x%x\n",
1395  temperature, phba->work_hs,
1396  phba->work_status[0], phba->work_status[1]);
1397 
1398  shost = lpfc_shost_from_vport(phba->pport);
1400  sizeof(temp_event_data),
1401  (char *) &temp_event_data,
1404 
1405  spin_lock_irq(&phba->hbalock);
1407  spin_unlock_irq(&phba->hbalock);
1408  lpfc_offline_eratt(phba);
1409 
1410  } else {
1411  /* The if clause above forces this code path when the status
1412  * failure is a value other than FFER6. Do not call the offline
1413  * twice. This is the adapter hardware error path.
1414  */
1416  "0457 Adapter Hardware Error "
1417  "Data: x%x x%x x%x\n",
1418  phba->work_hs,
1419  phba->work_status[0], phba->work_status[1]);
1420 
1421  event_data = FC_REG_DUMP_EVENT;
1422  shost = lpfc_shost_from_vport(vport);
1424  sizeof(event_data), (char *) &event_data,
1426 
1427  lpfc_offline_eratt(phba);
1428  }
1429  return;
1430 }
1431 
1443 static int
1444 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
1445 {
1446  int rc;
1447  uint32_t intr_mode;
1448 
1449  /*
1450  * On error status condition, driver need to wait for port
1451  * ready before performing reset.
1452  */
1453  rc = lpfc_sli4_pdev_status_reg_wait(phba);
1454  if (!rc) {
1455  /* need reset: attempt for port recovery */
1457  "2887 Reset Needed: Attempting Port "
1458  "Recovery...\n");
1459  lpfc_offline_prep(phba, mbx_action);
1460  lpfc_offline(phba);
1461  /* release interrupt for possible resource change */
1462  lpfc_sli4_disable_intr(phba);
1463  lpfc_sli_brdrestart(phba);
1464  /* request and enable interrupt */
1465  intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1466  if (intr_mode == LPFC_INTR_ERROR) {
1468  "3175 Failed to enable interrupt\n");
1469  return -EIO;
1470  } else {
1471  phba->intr_mode = intr_mode;
1472  }
1473  rc = lpfc_online(phba);
1474  if (rc == 0)
1475  lpfc_unblock_mgmt_io(phba);
1476  }
1477  return rc;
1478 }
1479 
1487 static void
1488 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1489 {
1490  struct lpfc_vport *vport = phba->pport;
1492  struct Scsi_Host *shost;
1493  uint32_t if_type;
1494  struct lpfc_register portstat_reg = {0};
1495  uint32_t reg_err1, reg_err2;
1496  uint32_t uerrlo_reg, uemasklo_reg;
1497  uint32_t pci_rd_rc1, pci_rd_rc2;
1498  int rc;
1499 
1500  /* If the pci channel is offline, ignore possible errors, since
1501  * we cannot communicate with the pci card anyway.
1502  */
1503  if (pci_channel_offline(phba->pcidev))
1504  return;
1505  /* If resets are disabled then leave the HBA alone and return */
1506  if (!phba->cfg_enable_hba_reset)
1507  return;
1508 
1509  if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1510  switch (if_type) {
1512  pci_rd_rc1 = lpfc_readl(
1513  phba->sli4_hba.u.if_type0.UERRLOregaddr,
1514  &uerrlo_reg);
1515  pci_rd_rc2 = lpfc_readl(
1516  phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1517  &uemasklo_reg);
1518  /* consider PCI bus read error as pci_channel_offline */
1519  if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1520  return;
1521  lpfc_sli4_offline_eratt(phba);
1522  break;
1524  pci_rd_rc1 = lpfc_readl(
1525  phba->sli4_hba.u.if_type2.STATUSregaddr,
1526  &portstat_reg.word0);
1527  /* consider PCI bus read error as pci_channel_offline */
1528  if (pci_rd_rc1 == -EIO) {
1530  "3151 PCI bus read access failure: x%x\n",
1531  readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1532  return;
1533  }
1534  reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1535  reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1536  if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1537  /* TODO: Register for Overtemp async events. */
1539  "2889 Port Overtemperature event, "
1540  "taking port offline\n");
1541  spin_lock_irq(&phba->hbalock);
1543  spin_unlock_irq(&phba->hbalock);
1544  lpfc_sli4_offline_eratt(phba);
1545  break;
1546  }
1547  if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1548  reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1550  "3143 Port Down: Firmware Restarted\n");
1551  else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1552  reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1554  "3144 Port Down: Debug Dump\n");
1555  else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1556  reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1558  "3145 Port Down: Provisioning\n");
1559 
1560  /* Check port status register for function reset */
1561  rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
1562  if (rc == 0) {
1563  /* don't report event on forced debug dump */
1564  if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1565  reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1566  return;
1567  else
1568  break;
1569  }
1570  /* fall through for not able to recover */
1572  "3152 Unrecoverable error, bring the port "
1573  "offline\n");
1574  lpfc_sli4_offline_eratt(phba);
1575  break;
1577  default:
1578  break;
1579  }
1581  "3123 Report dump event to upper layer\n");
1582  /* Send an internal error event to mgmt application */
1583  lpfc_board_errevt_to_mgmt(phba);
1584 
1585  event_data = FC_REG_DUMP_EVENT;
1586  shost = lpfc_shost_from_vport(vport);
1588  sizeof(event_data), (char *) &event_data,
1590 }
1591 
1603 void
1605 {
1606  (*phba->lpfc_handle_eratt)(phba);
1607 }
1608 
1616 void
1618 {
1619  struct lpfc_vport *vport = phba->pport;
1620  struct lpfc_sli *psli = &phba->sli;
1621  LPFC_MBOXQ_t *pmb;
1622  volatile uint32_t control;
1623  struct lpfc_dmabuf *mp;
1624  int rc = 0;
1625 
1627  if (!pmb) {
1628  rc = 1;
1629  goto lpfc_handle_latt_err_exit;
1630  }
1631 
1632  mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1633  if (!mp) {
1634  rc = 2;
1635  goto lpfc_handle_latt_free_pmb;
1636  }
1637 
1638  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1639  if (!mp->virt) {
1640  rc = 3;
1641  goto lpfc_handle_latt_free_mp;
1642  }
1643 
1644  /* Cleanup any outstanding ELS commands */
1645  lpfc_els_flush_all_cmd(phba);
1646 
1647  psli->slistat.link_event++;
1648  lpfc_read_topology(phba, pmb, mp);
1650  pmb->vport = vport;
1651  /* Block ELS IOCBs until we have processed this mbox command */
1652  phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1653  rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1654  if (rc == MBX_NOT_FINISHED) {
1655  rc = 4;
1656  goto lpfc_handle_latt_free_mbuf;
1657  }
1658 
1659  /* Clear Link Attention in HA REG */
1660  spin_lock_irq(&phba->hbalock);
1661  writel(HA_LATT, phba->HAregaddr);
1662  readl(phba->HAregaddr); /* flush */
1663  spin_unlock_irq(&phba->hbalock);
1664 
1665  return;
1666 
1667 lpfc_handle_latt_free_mbuf:
1668  phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1669  lpfc_mbuf_free(phba, mp->virt, mp->phys);
1670 lpfc_handle_latt_free_mp:
1671  kfree(mp);
1672 lpfc_handle_latt_free_pmb:
1673  mempool_free(pmb, phba->mbox_mem_pool);
1674 lpfc_handle_latt_err_exit:
1675  /* Enable Link attention interrupts */
1676  spin_lock_irq(&phba->hbalock);
1677  psli->sli_flag |= LPFC_PROCESS_LA;
1678  control = readl(phba->HCregaddr);
1679  control |= HC_LAINT_ENA;
1680  writel(control, phba->HCregaddr);
1681  readl(phba->HCregaddr); /* flush */
1682 
1683  /* Clear Link Attention in HA REG */
1684  writel(HA_LATT, phba->HAregaddr);
1685  readl(phba->HAregaddr); /* flush */
1686  spin_unlock_irq(&phba->hbalock);
1687  lpfc_linkdown(phba);
1688  phba->link_state = LPFC_HBA_ERROR;
1689 
1691  "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1692 
1693  return;
1694 }
1695 
1710 int
1711 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1712 {
1713  uint8_t lenlo, lenhi;
1714  int Length;
1715  int i, j;
1716  int finished = 0;
1717  int index = 0;
1718 
1719  if (!vpd)
1720  return 0;
1721 
1722  /* Vital Product */
1724  "0455 Vital Product Data: x%x x%x x%x x%x\n",
1725  (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1726  (uint32_t) vpd[3]);
1727  while (!finished && (index < (len - 4))) {
1728  switch (vpd[index]) {
1729  case 0x82:
1730  case 0x91:
1731  index += 1;
1732  lenlo = vpd[index];
1733  index += 1;
1734  lenhi = vpd[index];
1735  index += 1;
1736  i = ((((unsigned short)lenhi) << 8) + lenlo);
1737  index += i;
1738  break;
1739  case 0x90:
1740  index += 1;
1741  lenlo = vpd[index];
1742  index += 1;
1743  lenhi = vpd[index];
1744  index += 1;
1745  Length = ((((unsigned short)lenhi) << 8) + lenlo);
1746  if (Length > len - index)
1747  Length = len - index;
1748  while (Length > 0) {
1749  /* Look for Serial Number */
1750  if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1751  index += 2;
1752  i = vpd[index];
1753  index += 1;
1754  j = 0;
1755  Length -= (3+i);
1756  while(i--) {
1757  phba->SerialNumber[j++] = vpd[index++];
1758  if (j == 31)
1759  break;
1760  }
1761  phba->SerialNumber[j] = 0;
1762  continue;
1763  }
1764  else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1765  phba->vpd_flag |= VPD_MODEL_DESC;
1766  index += 2;
1767  i = vpd[index];
1768  index += 1;
1769  j = 0;
1770  Length -= (3+i);
1771  while(i--) {
1772  phba->ModelDesc[j++] = vpd[index++];
1773  if (j == 255)
1774  break;
1775  }
1776  phba->ModelDesc[j] = 0;
1777  continue;
1778  }
1779  else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1780  phba->vpd_flag |= VPD_MODEL_NAME;
1781  index += 2;
1782  i = vpd[index];
1783  index += 1;
1784  j = 0;
1785  Length -= (3+i);
1786  while(i--) {
1787  phba->ModelName[j++] = vpd[index++];
1788  if (j == 79)
1789  break;
1790  }
1791  phba->ModelName[j] = 0;
1792  continue;
1793  }
1794  else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1795  phba->vpd_flag |= VPD_PROGRAM_TYPE;
1796  index += 2;
1797  i = vpd[index];
1798  index += 1;
1799  j = 0;
1800  Length -= (3+i);
1801  while(i--) {
1802  phba->ProgramType[j++] = vpd[index++];
1803  if (j == 255)
1804  break;
1805  }
1806  phba->ProgramType[j] = 0;
1807  continue;
1808  }
1809  else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1810  phba->vpd_flag |= VPD_PORT;
1811  index += 2;
1812  i = vpd[index];
1813  index += 1;
1814  j = 0;
1815  Length -= (3+i);
1816  while(i--) {
1817  if ((phba->sli_rev == LPFC_SLI_REV4) &&
1818  (phba->sli4_hba.pport_name_sta ==
1820  j++;
1821  index++;
1822  } else
1823  phba->Port[j++] = vpd[index++];
1824  if (j == 19)
1825  break;
1826  }
1827  if ((phba->sli_rev != LPFC_SLI_REV4) ||
1828  (phba->sli4_hba.pport_name_sta ==
1830  phba->Port[j] = 0;
1831  continue;
1832  }
1833  else {
1834  index += 2;
1835  i = vpd[index];
1836  index += 1;
1837  index += i;
1838  Length -= (3 + i);
1839  }
1840  }
1841  finished = 0;
1842  break;
1843  case 0x78:
1844  finished = 1;
1845  break;
1846  default:
1847  index ++;
1848  break;
1849  }
1850  }
1851 
1852  return(1);
1853 }
1854 
1867 static void
1868 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1869 {
1870  lpfc_vpd_t *vp;
1871  uint16_t dev_id = phba->pcidev->device;
1872  int max_speed;
1873  int GE = 0;
1874  int oneConnect = 0; /* default is not a oneConnect */
1875  struct {
1876  char *name;
1877  char *bus;
1878  char *function;
1879  } m = {"<Unknown>", "", ""};
1880 
1881  if (mdp && mdp[0] != '\0'
1882  && descp && descp[0] != '\0')
1883  return;
1884 
1885  if (phba->lmt & LMT_16Gb)
1886  max_speed = 16;
1887  else if (phba->lmt & LMT_10Gb)
1888  max_speed = 10;
1889  else if (phba->lmt & LMT_8Gb)
1890  max_speed = 8;
1891  else if (phba->lmt & LMT_4Gb)
1892  max_speed = 4;
1893  else if (phba->lmt & LMT_2Gb)
1894  max_speed = 2;
1895  else if (phba->lmt & LMT_1Gb)
1896  max_speed = 1;
1897  else
1898  max_speed = 0;
1899 
1900  vp = &phba->vpd;
1901 
1902  switch (dev_id) {
1903  case PCI_DEVICE_ID_FIREFLY:
1904  m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1905  break;
1907  if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1908  m = (typeof(m)){"LP7000", "PCI",
1909  "Fibre Channel Adapter"};
1910  else
1911  m = (typeof(m)){"LP7000E", "PCI",
1912  "Fibre Channel Adapter"};
1913  break;
1915  m = (typeof(m)){"LP8000", "PCI",
1916  "Fibre Channel Adapter"};
1917  break;
1918  case PCI_DEVICE_ID_CENTAUR:
1919  if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1920  m = (typeof(m)){"LP9002", "PCI",
1921  "Fibre Channel Adapter"};
1922  else
1923  m = (typeof(m)){"LP9000", "PCI",
1924  "Fibre Channel Adapter"};
1925  break;
1926  case PCI_DEVICE_ID_RFLY:
1927  m = (typeof(m)){"LP952", "PCI",
1928  "Fibre Channel Adapter"};
1929  break;
1930  case PCI_DEVICE_ID_PEGASUS:
1931  m = (typeof(m)){"LP9802", "PCI-X",
1932  "Fibre Channel Adapter"};
1933  break;
1934  case PCI_DEVICE_ID_THOR:
1935  m = (typeof(m)){"LP10000", "PCI-X",
1936  "Fibre Channel Adapter"};
1937  break;
1938  case PCI_DEVICE_ID_VIPER:
1939  m = (typeof(m)){"LPX1000", "PCI-X",
1940  "Fibre Channel Adapter"};
1941  break;
1942  case PCI_DEVICE_ID_PFLY:
1943  m = (typeof(m)){"LP982", "PCI-X",
1944  "Fibre Channel Adapter"};
1945  break;
1946  case PCI_DEVICE_ID_TFLY:
1947  m = (typeof(m)){"LP1050", "PCI-X",
1948  "Fibre Channel Adapter"};
1949  break;
1950  case PCI_DEVICE_ID_HELIOS:
1951  m = (typeof(m)){"LP11000", "PCI-X2",
1952  "Fibre Channel Adapter"};
1953  break;
1955  m = (typeof(m)){"LP11000-SP", "PCI-X2",
1956  "Fibre Channel Adapter"};
1957  break;
1959  m = (typeof(m)){"LP11002-SP", "PCI-X2",
1960  "Fibre Channel Adapter"};
1961  break;
1962  case PCI_DEVICE_ID_NEPTUNE:
1963  m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1964  break;
1966  m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1967  break;
1969  m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1970  break;
1971  case PCI_DEVICE_ID_BMID:
1972  m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1973  break;
1974  case PCI_DEVICE_ID_BSMB:
1975  m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1976  break;
1977  case PCI_DEVICE_ID_ZEPHYR:
1978  m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1979  break;
1981  m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1982  break;
1984  m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1985  GE = 1;
1986  break;
1987  case PCI_DEVICE_ID_ZMID:
1988  m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1989  break;
1990  case PCI_DEVICE_ID_ZSMB:
1991  m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1992  break;
1993  case PCI_DEVICE_ID_LP101:
1994  m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1995  break;
1997  m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1998  break;
2000  m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
2001  break;
2003  m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
2004  break;
2005  case PCI_DEVICE_ID_SAT:
2006  m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2007  break;
2008  case PCI_DEVICE_ID_SAT_MID:
2009  m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2010  break;
2011  case PCI_DEVICE_ID_SAT_SMB:
2012  m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2013  break;
2015  m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2016  break;
2018  m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2019  break;
2020  case PCI_DEVICE_ID_SAT_S:
2021  m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2022  break;
2023  case PCI_DEVICE_ID_HORNET:
2024  m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
2025  GE = 1;
2026  break;
2028  m = (typeof(m)){"LPev12000", "PCIe IOV",
2029  "Fibre Channel Adapter"};
2030  break;
2032  m = (typeof(m)){"LPev12000", "PCIe IOV",
2033  "Fibre Channel Adapter"};
2034  break;
2036  m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2037  "Fibre Channel Adapter"};
2038  break;
2040  oneConnect = 1;
2041  m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2042  break;
2043  case PCI_DEVICE_ID_TOMCAT:
2044  oneConnect = 1;
2045  m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2046  break;
2047  case PCI_DEVICE_ID_FALCON:
2048  m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2049  "EmulexSecure Fibre"};
2050  break;
2051  case PCI_DEVICE_ID_BALIUS:
2052  m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2053  "Fibre Channel Adapter"};
2054  break;
2057  m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2058  break;
2061  oneConnect = 1;
2062  m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2063  break;
2064  case PCI_DEVICE_ID_SKYHAWK:
2066  oneConnect = 1;
2067  m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2068  break;
2069  default:
2070  m = (typeof(m)){"Unknown", "", ""};
2071  break;
2072  }
2073 
2074  if (mdp && mdp[0] == '\0')
2075  snprintf(mdp, 79,"%s", m.name);
2076  /*
2077  * oneConnect hba requires special processing, they are all initiators
2078  * and we put the port number on the end
2079  */
2080  if (descp && descp[0] == '\0') {
2081  if (oneConnect)
2082  snprintf(descp, 255,
2083  "Emulex OneConnect %s, %s Initiator %s",
2084  m.name, m.function,
2085  phba->Port);
2086  else if (max_speed == 0)
2087  snprintf(descp, 255,
2088  "Emulex %s %s %s ",
2089  m.name, m.bus, m.function);
2090  else
2091  snprintf(descp, 255,
2092  "Emulex %s %d%s %s %s",
2093  m.name, max_speed, (GE) ? "GE" : "Gb",
2094  m.bus, m.function);
2095  }
2096 }
2097 
2110 int
2111 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2112 {
2113  IOCB_t *icmd;
2114  struct lpfc_iocbq *iocb;
2115  struct lpfc_dmabuf *mp1, *mp2;
2116 
2117  cnt += pring->missbufcnt;
2118 
2119  /* While there are buffers to post */
2120  while (cnt > 0) {
2121  /* Allocate buffer for command iocb */
2122  iocb = lpfc_sli_get_iocbq(phba);
2123  if (iocb == NULL) {
2124  pring->missbufcnt = cnt;
2125  return cnt;
2126  }
2127  icmd = &iocb->iocb;
2128 
2129  /* 2 buffers can be posted per command */
2130  /* Allocate buffer to post */
2131  mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2132  if (mp1)
2133  mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2134  if (!mp1 || !mp1->virt) {
2135  kfree(mp1);
2136  lpfc_sli_release_iocbq(phba, iocb);
2137  pring->missbufcnt = cnt;
2138  return cnt;
2139  }
2140 
2141  INIT_LIST_HEAD(&mp1->list);
2142  /* Allocate buffer to post */
2143  if (cnt > 1) {
2144  mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2145  if (mp2)
2146  mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2147  &mp2->phys);
2148  if (!mp2 || !mp2->virt) {
2149  kfree(mp2);
2150  lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2151  kfree(mp1);
2152  lpfc_sli_release_iocbq(phba, iocb);
2153  pring->missbufcnt = cnt;
2154  return cnt;
2155  }
2156 
2157  INIT_LIST_HEAD(&mp2->list);
2158  } else {
2159  mp2 = NULL;
2160  }
2161 
2162  icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2163  icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2164  icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2165  icmd->ulpBdeCount = 1;
2166  cnt--;
2167  if (mp2) {
2168  icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2169  icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2170  icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2171  cnt--;
2172  icmd->ulpBdeCount = 2;
2173  }
2174 
2176  icmd->ulpLe = 1;
2177 
2178  if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2179  IOCB_ERROR) {
2180  lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2181  kfree(mp1);
2182  cnt++;
2183  if (mp2) {
2184  lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2185  kfree(mp2);
2186  cnt++;
2187  }
2188  lpfc_sli_release_iocbq(phba, iocb);
2189  pring->missbufcnt = cnt;
2190  return cnt;
2191  }
2192  lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2193  if (mp2)
2194  lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2195  }
2196  pring->missbufcnt = 0;
2197  return 0;
2198 }
2199 
2211 static int
2212 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2213 {
2214  struct lpfc_sli *psli = &phba->sli;
2215 
2216  /* Ring 0, ELS / CT buffers */
2218  /* Ring 2 - FCP no buffers needed */
2219 
2220  return 0;
2221 }
2222 
2223 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2224 
2232 static void
2233 lpfc_sha_init(uint32_t * HashResultPointer)
2234 {
2235  HashResultPointer[0] = 0x67452301;
2236  HashResultPointer[1] = 0xEFCDAB89;
2237  HashResultPointer[2] = 0x98BADCFE;
2238  HashResultPointer[3] = 0x10325476;
2239  HashResultPointer[4] = 0xC3D2E1F0;
2240 }
2241 
2252 static void
2253 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2254 {
2255  int t;
2256  uint32_t TEMP;
2257  uint32_t A, B, C, D, E;
2258  t = 16;
2259  do {
2260  HashWorkingPointer[t] =
2261  S(1,
2262  HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2263  8] ^
2264  HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2265  } while (++t <= 79);
2266  t = 0;
2267  A = HashResultPointer[0];
2268  B = HashResultPointer[1];
2269  C = HashResultPointer[2];
2270  D = HashResultPointer[3];
2271  E = HashResultPointer[4];
2272 
2273  do {
2274  if (t < 20) {
2275  TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2276  } else if (t < 40) {
2277  TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2278  } else if (t < 60) {
2279  TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2280  } else {
2281  TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2282  }
2283  TEMP += S(5, A) + E + HashWorkingPointer[t];
2284  E = D;
2285  D = C;
2286  C = S(30, B);
2287  B = A;
2288  A = TEMP;
2289  } while (++t <= 79);
2290 
2291  HashResultPointer[0] += A;
2292  HashResultPointer[1] += B;
2293  HashResultPointer[2] += C;
2294  HashResultPointer[3] += D;
2295  HashResultPointer[4] += E;
2296 
2297 }
2298 
2309 static void
2310 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2311 {
2312  *HashWorking = (*RandomChallenge ^ *HashWorking);
2313 }
2314 
2322 void
2323 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2324 {
2325  int t;
2326  uint32_t *HashWorking;
2327  uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2328 
2329  HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2330  if (!HashWorking)
2331  return;
2332 
2333  HashWorking[0] = HashWorking[78] = *pwwnn++;
2334  HashWorking[1] = HashWorking[79] = *pwwnn;
2335 
2336  for (t = 0; t < 7; t++)
2337  lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2338 
2339  lpfc_sha_init(hbainit);
2340  lpfc_sha_iterate(hbainit, HashWorking);
2341  kfree(HashWorking);
2342 }
2343 
2353 void
2354 lpfc_cleanup(struct lpfc_vport *vport)
2355 {
2356  struct lpfc_hba *phba = vport->phba;
2357  struct lpfc_nodelist *ndlp, *next_ndlp;
2358  int i = 0;
2359 
2360  if (phba->link_state > LPFC_LINK_DOWN)
2361  lpfc_port_link_failure(vport);
2362 
2363  list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2364  if (!NLP_CHK_NODE_ACT(ndlp)) {
2365  ndlp = lpfc_enable_node(vport, ndlp,
2367  if (!ndlp)
2368  continue;
2369  spin_lock_irq(&phba->ndlp_lock);
2370  NLP_SET_FREE_REQ(ndlp);
2371  spin_unlock_irq(&phba->ndlp_lock);
2372  /* Trigger the release of the ndlp memory */
2373  lpfc_nlp_put(ndlp);
2374  continue;
2375  }
2376  spin_lock_irq(&phba->ndlp_lock);
2377  if (NLP_CHK_FREE_REQ(ndlp)) {
2378  /* The ndlp should not be in memory free mode already */
2379  spin_unlock_irq(&phba->ndlp_lock);
2380  continue;
2381  } else
2382  /* Indicate request for freeing ndlp memory */
2383  NLP_SET_FREE_REQ(ndlp);
2384  spin_unlock_irq(&phba->ndlp_lock);
2385 
2386  if (vport->port_type != LPFC_PHYSICAL_PORT &&
2387  ndlp->nlp_DID == Fabric_DID) {
2388  /* Just free up ndlp with Fabric_DID for vports */
2389  lpfc_nlp_put(ndlp);
2390  continue;
2391  }
2392 
2393  /* take care of nodes in unused state before the state
2394  * machine taking action.
2395  */
2396  if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2397  lpfc_nlp_put(ndlp);
2398  continue;
2399  }
2400 
2401  if (ndlp->nlp_type & NLP_FABRIC)
2402  lpfc_disc_state_machine(vport, ndlp, NULL,
2404 
2405  lpfc_disc_state_machine(vport, ndlp, NULL,
2407  }
2408 
2409  /* At this point, ALL ndlp's should be gone
2410  * because of the previous NLP_EVT_DEVICE_RM.
2411  * Lets wait for this to happen, if needed.
2412  */
2413  while (!list_empty(&vport->fc_nodes)) {
2414  if (i++ > 3000) {
2416  "0233 Nodelist not empty\n");
2417  list_for_each_entry_safe(ndlp, next_ndlp,
2418  &vport->fc_nodes, nlp_listp) {
2420  LOG_NODE,
2421  "0282 did:x%x ndlp:x%p "
2422  "usgmap:x%x refcnt:%d\n",
2423  ndlp->nlp_DID, (void *)ndlp,
2424  ndlp->nlp_usg_map,
2425  atomic_read(
2426  &ndlp->kref.refcount));
2427  }
2428  break;
2429  }
2430 
2431  /* Wait for any activity on ndlps to settle */
2432  msleep(10);
2433  }
2435 }
2436 
2445 void
2447 {
2448  del_timer_sync(&vport->els_tmofunc);
2449  del_timer_sync(&vport->fc_fdmitmo);
2451  lpfc_can_disctmo(vport);
2452  return;
2453 }
2454 
2462 void
2464 {
2465  /* Clear pending FCF rediscovery wait flag */
2466  phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2467 
2468  /* Now, try to stop the timer */
2469  del_timer(&phba->fcf.redisc_wait);
2470 }
2471 
2481 void
2483 {
2484  spin_lock_irq(&phba->hbalock);
2485  if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2486  /* FCF rediscovery timer already fired or stopped */
2487  spin_unlock_irq(&phba->hbalock);
2488  return;
2489  }
2491  /* Clear failover in progress flags */
2492  phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2493  spin_unlock_irq(&phba->hbalock);
2494 }
2495 
2503 void
2505 {
2507  del_timer_sync(&phba->sli.mbox_tmo);
2509  del_timer_sync(&phba->eratt_poll);
2510  del_timer_sync(&phba->hb_tmofunc);
2511  if (phba->sli_rev == LPFC_SLI_REV4) {
2512  del_timer_sync(&phba->rrq_tmr);
2513  phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2514  }
2515  phba->hb_outstanding = 0;
2516 
2517  switch (phba->pci_dev_grp) {
2518  case LPFC_PCI_DEV_LP:
2519  /* Stop any LightPulse device specific driver timers */
2521  break;
2522  case LPFC_PCI_DEV_OC:
2523  /* Stop any OneConnect device sepcific driver timers */
2525  break;
2526  default:
2528  "0297 Invalid device group (x%x)\n",
2529  phba->pci_dev_grp);
2530  break;
2531  }
2532  return;
2533 }
2534 
2545 static void
2546 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2547 {
2548  unsigned long iflag;
2549  uint8_t actcmd = MBX_HEARTBEAT;
2550  unsigned long timeout;
2551 
2552  spin_lock_irqsave(&phba->hbalock, iflag);
2553  phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2554  spin_unlock_irqrestore(&phba->hbalock, iflag);
2555  if (mbx_action == LPFC_MBX_NO_WAIT)
2556  return;
2557  timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2558  spin_lock_irqsave(&phba->hbalock, iflag);
2559  if (phba->sli.mbox_active) {
2560  actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2561  /* Determine how long we might wait for the active mailbox
2562  * command to be gracefully completed by firmware.
2563  */
2564  timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2565  phba->sli.mbox_active) * 1000) + jiffies;
2566  }
2567  spin_unlock_irqrestore(&phba->hbalock, iflag);
2568 
2569  /* Wait for the outstnading mailbox command to complete */
2570  while (phba->sli.mbox_active) {
2571  /* Check active mailbox complete status every 2ms */
2572  msleep(2);
2573  if (time_after(jiffies, timeout)) {
2575  "2813 Mgmt IO is Blocked %x "
2576  "- mbox cmd %x still active\n",
2577  phba->sli.sli_flag, actcmd);
2578  break;
2579  }
2580  }
2581 }
2582 
2591 void
2593 {
2594  struct lpfc_nodelist *ndlp, *next_ndlp;
2595  struct lpfc_vport **vports;
2596  int i;
2597 
2598  if (phba->sli_rev != LPFC_SLI_REV4)
2599  return;
2600 
2601  vports = lpfc_create_vport_work_array(phba);
2602  if (vports != NULL) {
2603  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2604  if (vports[i]->load_flag & FC_UNLOADING)
2605  continue;
2606 
2607  list_for_each_entry_safe(ndlp, next_ndlp,
2608  &vports[i]->fc_nodes,
2609  nlp_listp) {
2610  if (NLP_CHK_NODE_ACT(ndlp))
2611  ndlp->nlp_rpi =
2612  lpfc_sli4_alloc_rpi(phba);
2613  }
2614  }
2615  }
2616  lpfc_destroy_vport_work_array(phba, vports);
2617 }
2618 
2631 int
2632 lpfc_online(struct lpfc_hba *phba)
2633 {
2634  struct lpfc_vport *vport;
2635  struct lpfc_vport **vports;
2636  int i;
2637 
2638  if (!phba)
2639  return 0;
2640  vport = phba->pport;
2641 
2642  if (!(vport->fc_flag & FC_OFFLINE_MODE))
2643  return 0;
2644 
2646  "0458 Bring Adapter online\n");
2647 
2648  lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2649 
2650  if (!lpfc_sli_queue_setup(phba)) {
2651  lpfc_unblock_mgmt_io(phba);
2652  return 1;
2653  }
2654 
2655  if (phba->sli_rev == LPFC_SLI_REV4) {
2656  if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2657  lpfc_unblock_mgmt_io(phba);
2658  return 1;
2659  }
2660  } else {
2661  if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2662  lpfc_unblock_mgmt_io(phba);
2663  return 1;
2664  }
2665  }
2666 
2667  vports = lpfc_create_vport_work_array(phba);
2668  if (vports != NULL)
2669  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2670  struct Scsi_Host *shost;
2671  shost = lpfc_shost_from_vport(vports[i]);
2672  spin_lock_irq(shost->host_lock);
2673  vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2675  vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2676  if (phba->sli_rev == LPFC_SLI_REV4)
2677  vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2678  spin_unlock_irq(shost->host_lock);
2679  }
2680  lpfc_destroy_vport_work_array(phba, vports);
2681 
2682  lpfc_unblock_mgmt_io(phba);
2683  return 0;
2684 }
2685 
2697 void
2699 {
2700  unsigned long iflag;
2701 
2702  spin_lock_irqsave(&phba->hbalock, iflag);
2703  phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2704  spin_unlock_irqrestore(&phba->hbalock, iflag);
2705 }
2706 
2715 void
2716 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2717 {
2718  struct lpfc_vport *vport = phba->pport;
2719  struct lpfc_nodelist *ndlp, *next_ndlp;
2720  struct lpfc_vport **vports;
2721  struct Scsi_Host *shost;
2722  int i;
2723 
2724  if (vport->fc_flag & FC_OFFLINE_MODE)
2725  return;
2726 
2727  lpfc_block_mgmt_io(phba, mbx_action);
2728 
2729  lpfc_linkdown(phba);
2730 
2731  /* Issue an unreg_login to all nodes on all vports */
2732  vports = lpfc_create_vport_work_array(phba);
2733  if (vports != NULL) {
2734  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2735  if (vports[i]->load_flag & FC_UNLOADING)
2736  continue;
2737  shost = lpfc_shost_from_vport(vports[i]);
2738  spin_lock_irq(shost->host_lock);
2739  vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2740  vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2741  vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2742  spin_unlock_irq(shost->host_lock);
2743 
2744  shost = lpfc_shost_from_vport(vports[i]);
2745  list_for_each_entry_safe(ndlp, next_ndlp,
2746  &vports[i]->fc_nodes,
2747  nlp_listp) {
2748  if (!NLP_CHK_NODE_ACT(ndlp))
2749  continue;
2750  if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2751  continue;
2752  if (ndlp->nlp_type & NLP_FABRIC) {
2753  lpfc_disc_state_machine(vports[i], ndlp,
2755  lpfc_disc_state_machine(vports[i], ndlp,
2757  }
2758  spin_lock_irq(shost->host_lock);
2759  ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2760  spin_unlock_irq(shost->host_lock);
2761  /*
2762  * Whenever an SLI4 port goes offline, free the
2763  * RPI. Get a new RPI when the adapter port
2764  * comes back online.
2765  */
2766  if (phba->sli_rev == LPFC_SLI_REV4)
2767  lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2768  lpfc_unreg_rpi(vports[i], ndlp);
2769  }
2770  }
2771  }
2772  lpfc_destroy_vport_work_array(phba, vports);
2773 
2774  lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2775 }
2776 
2785 void
2786 lpfc_offline(struct lpfc_hba *phba)
2787 {
2788  struct Scsi_Host *shost;
2789  struct lpfc_vport **vports;
2790  int i;
2791 
2792  if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2793  return;
2794 
2795  /* stop port and all timers associated with this hba */
2796  lpfc_stop_port(phba);
2797  vports = lpfc_create_vport_work_array(phba);
2798  if (vports != NULL)
2799  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2800  lpfc_stop_vport_timers(vports[i]);
2801  lpfc_destroy_vport_work_array(phba, vports);
2803  "0460 Bring Adapter offline\n");
2804  /* Bring down the SLI Layer and cleanup. The HBA is offline
2805  now. */
2806  lpfc_sli_hba_down(phba);
2807  spin_lock_irq(&phba->hbalock);
2808  phba->work_ha = 0;
2809  spin_unlock_irq(&phba->hbalock);
2810  vports = lpfc_create_vport_work_array(phba);
2811  if (vports != NULL)
2812  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2813  shost = lpfc_shost_from_vport(vports[i]);
2814  spin_lock_irq(shost->host_lock);
2815  vports[i]->work_port_events = 0;
2816  vports[i]->fc_flag |= FC_OFFLINE_MODE;
2817  spin_unlock_irq(shost->host_lock);
2818  }
2819  lpfc_destroy_vport_work_array(phba, vports);
2820 }
2821 
2830 static void
2831 lpfc_scsi_free(struct lpfc_hba *phba)
2832 {
2833  struct lpfc_scsi_buf *sb, *sb_next;
2834  struct lpfc_iocbq *io, *io_next;
2835 
2836  spin_lock_irq(&phba->hbalock);
2837  /* Release all the lpfc_scsi_bufs maintained by this host. */
2838  spin_lock(&phba->scsi_buf_list_lock);
2839  list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2840  list_del(&sb->list);
2841  pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2842  sb->dma_handle);
2843  kfree(sb);
2844  phba->total_scsi_bufs--;
2845  }
2846  spin_unlock(&phba->scsi_buf_list_lock);
2847 
2848  /* Release all the lpfc_iocbq entries maintained by this host. */
2849  list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2850  list_del(&io->list);
2851  kfree(io);
2852  phba->total_iocbq_bufs--;
2853  }
2854 
2855  spin_unlock_irq(&phba->hbalock);
2856 }
2857 
2870 int
2872 {
2873  struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2874  struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2875  uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2876  LIST_HEAD(els_sgl_list);
2877  LIST_HEAD(scsi_sgl_list);
2878  int rc;
2879 
2880  /*
2881  * update on pci function's els xri-sgl list
2882  */
2883  els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2884  if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2885  /* els xri-sgl expanded */
2886  xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2888  "3157 ELS xri-sgl count increased from "
2889  "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2890  els_xri_cnt);
2891  /* allocate the additional els sgls */
2892  for (i = 0; i < xri_cnt; i++) {
2893  sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2894  GFP_KERNEL);
2895  if (sglq_entry == NULL) {
2897  "2562 Failure to allocate an "
2898  "ELS sgl entry:%d\n", i);
2899  rc = -ENOMEM;
2900  goto out_free_mem;
2901  }
2902  sglq_entry->buff_type = GEN_BUFF_TYPE;
2903  sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2904  &sglq_entry->phys);
2905  if (sglq_entry->virt == NULL) {
2906  kfree(sglq_entry);
2908  "2563 Failure to allocate an "
2909  "ELS mbuf:%d\n", i);
2910  rc = -ENOMEM;
2911  goto out_free_mem;
2912  }
2913  sglq_entry->sgl = sglq_entry->virt;
2914  memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2915  sglq_entry->state = SGL_FREED;
2916  list_add_tail(&sglq_entry->list, &els_sgl_list);
2917  }
2918  spin_lock(&phba->hbalock);
2919  list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2920  spin_unlock(&phba->hbalock);
2921  } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2922  /* els xri-sgl shrinked */
2923  xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2925  "3158 ELS xri-sgl count decreased from "
2926  "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2927  els_xri_cnt);
2928  spin_lock_irq(&phba->hbalock);
2929  list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2930  spin_unlock_irq(&phba->hbalock);
2931  /* release extra els sgls from list */
2932  for (i = 0; i < xri_cnt; i++) {
2933  list_remove_head(&els_sgl_list,
2934  sglq_entry, struct lpfc_sglq, list);
2935  if (sglq_entry) {
2936  lpfc_mbuf_free(phba, sglq_entry->virt,
2937  sglq_entry->phys);
2938  kfree(sglq_entry);
2939  }
2940  }
2941  spin_lock_irq(&phba->hbalock);
2942  list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2943  spin_unlock_irq(&phba->hbalock);
2944  } else
2946  "3163 ELS xri-sgl count unchanged: %d\n",
2947  els_xri_cnt);
2948  phba->sli4_hba.els_xri_cnt = els_xri_cnt;
2949 
2950  /* update xris to els sgls on the list */
2951  sglq_entry = NULL;
2952  sglq_entry_next = NULL;
2953  list_for_each_entry_safe(sglq_entry, sglq_entry_next,
2954  &phba->sli4_hba.lpfc_sgl_list, list) {
2955  lxri = lpfc_sli4_next_xritag(phba);
2956  if (lxri == NO_XRI) {
2958  "2400 Failed to allocate xri for "
2959  "ELS sgl\n");
2960  rc = -ENOMEM;
2961  goto out_free_mem;
2962  }
2963  sglq_entry->sli4_lxritag = lxri;
2964  sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2965  }
2966 
2967  /*
2968  * update on pci function's allocated scsi xri-sgl list
2969  */
2970  phba->total_scsi_bufs = 0;
2971 
2972  /* maximum number of xris available for scsi buffers */
2973  phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
2974  els_xri_cnt;
2975 
2977  "2401 Current allocated SCSI xri-sgl count:%d, "
2978  "maximum SCSI xri count:%d\n",
2979  phba->sli4_hba.scsi_xri_cnt,
2980  phba->sli4_hba.scsi_xri_max);
2981 
2982  spin_lock_irq(&phba->scsi_buf_list_lock);
2983  list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
2984  spin_unlock_irq(&phba->scsi_buf_list_lock);
2985 
2986  if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2987  /* max scsi xri shrinked below the allocated scsi buffers */
2988  scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
2989  phba->sli4_hba.scsi_xri_max;
2990  /* release the extra allocated scsi buffers */
2991  for (i = 0; i < scsi_xri_cnt; i++) {
2992  list_remove_head(&scsi_sgl_list, psb,
2993  struct lpfc_scsi_buf, list);
2994  pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
2995  psb->dma_handle);
2996  kfree(psb);
2997  }
2998  spin_lock_irq(&phba->scsi_buf_list_lock);
2999  phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3000  spin_unlock_irq(&phba->scsi_buf_list_lock);
3001  }
3002 
3003  /* update xris associated to remaining allocated scsi buffers */
3004  psb = NULL;
3005  psb_next = NULL;
3006  list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3007  lxri = lpfc_sli4_next_xritag(phba);
3008  if (lxri == NO_XRI) {
3010  "2560 Failed to allocate xri for "
3011  "scsi buffer\n");
3012  rc = -ENOMEM;
3013  goto out_free_mem;
3014  }
3015  psb->cur_iocbq.sli4_lxritag = lxri;
3016  psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3017  }
3018  spin_lock(&phba->scsi_buf_list_lock);
3019  list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
3020  spin_unlock(&phba->scsi_buf_list_lock);
3021 
3022  return 0;
3023 
3024 out_free_mem:
3025  lpfc_free_els_sgl_list(phba);
3026  lpfc_scsi_free(phba);
3027  return rc;
3028 }
3029 
3046 struct lpfc_vport *
3047 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3048 {
3049  struct lpfc_vport *vport;
3050  struct Scsi_Host *shost;
3051  int error = 0;
3052 
3053  if (dev != &phba->pcidev->dev)
3055  sizeof(struct lpfc_vport));
3056  else
3057  shost = scsi_host_alloc(&lpfc_template,
3058  sizeof(struct lpfc_vport));
3059  if (!shost)
3060  goto out;
3061 
3062  vport = (struct lpfc_vport *) shost->hostdata;
3063  vport->phba = phba;
3064  vport->load_flag |= FC_LOADING;
3065  vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3066  vport->fc_rscn_flush = 0;
3067 
3068  lpfc_get_vport_cfgparam(vport);
3069  shost->unique_id = instance;
3070  shost->max_id = LPFC_MAX_TARGET;
3071  shost->max_lun = vport->cfg_max_luns;
3072  shost->this_id = -1;
3073  shost->max_cmd_len = 16;
3074  if (phba->sli_rev == LPFC_SLI_REV4) {
3075  shost->dma_boundary =
3076  phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3077  shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3078  }
3079 
3080  /*
3081  * Set initial can_queue value since 0 is no longer supported and
3082  * scsi_add_host will fail. This will be adjusted later based on the
3083  * max xri value determined in hba setup.
3084  */
3085  shost->can_queue = phba->cfg_hba_queue_depth - 10;
3086  if (dev != &phba->pcidev->dev) {
3087  shost->transportt = lpfc_vport_transport_template;
3088  vport->port_type = LPFC_NPIV_PORT;
3089  } else {
3090  shost->transportt = lpfc_transport_template;
3091  vport->port_type = LPFC_PHYSICAL_PORT;
3092  }
3093 
3094  /* Initialize all internally managed lists. */
3095  INIT_LIST_HEAD(&vport->fc_nodes);
3096  INIT_LIST_HEAD(&vport->rcv_buffer_list);
3097  spin_lock_init(&vport->work_port_lock);
3098 
3099  init_timer(&vport->fc_disctmo);
3100  vport->fc_disctmo.function = lpfc_disc_timeout;
3101  vport->fc_disctmo.data = (unsigned long)vport;
3102 
3103  init_timer(&vport->fc_fdmitmo);
3104  vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3105  vport->fc_fdmitmo.data = (unsigned long)vport;
3106 
3107  init_timer(&vport->els_tmofunc);
3108  vport->els_tmofunc.function = lpfc_els_timeout;
3109  vport->els_tmofunc.data = (unsigned long)vport;
3110 
3111  init_timer(&vport->delayed_disc_tmo);
3112  vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3113  vport->delayed_disc_tmo.data = (unsigned long)vport;
3114 
3115  error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3116  if (error)
3117  goto out_put_shost;
3118 
3119  spin_lock_irq(&phba->hbalock);
3120  list_add_tail(&vport->listentry, &phba->port_list);
3121  spin_unlock_irq(&phba->hbalock);
3122  return vport;
3123 
3124 out_put_shost:
3125  scsi_host_put(shost);
3126 out:
3127  return NULL;
3128 }
3129 
3137 void
3138 destroy_port(struct lpfc_vport *vport)
3139 {
3140  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3141  struct lpfc_hba *phba = vport->phba;
3142 
3143  lpfc_debugfs_terminate(vport);
3144  fc_remove_host(shost);
3145  scsi_remove_host(shost);
3146 
3147  spin_lock_irq(&phba->hbalock);
3148  list_del_init(&vport->listentry);
3149  spin_unlock_irq(&phba->hbalock);
3150 
3151  lpfc_cleanup(vport);
3152  return;
3153 }
3154 
3165 int
3167 {
3168  int instance = 0;
3169 
3170  /* Assign an unused number */
3171  if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
3172  return -1;
3173  if (idr_get_new(&lpfc_hba_index, NULL, &instance))
3174  return -1;
3175  return instance;
3176 }
3177 
3193 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3194 {
3195  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3196  struct lpfc_hba *phba = vport->phba;
3197  int stat = 0;
3198 
3199  spin_lock_irq(shost->host_lock);
3200 
3201  if (vport->load_flag & FC_UNLOADING) {
3202  stat = 1;
3203  goto finished;
3204  }
3205  if (time >= 30 * HZ) {
3207  "0461 Scanning longer than 30 "
3208  "seconds. Continuing initialization\n");
3209  stat = 1;
3210  goto finished;
3211  }
3212  if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
3214  "0465 Link down longer than 15 "
3215  "seconds. Continuing initialization\n");
3216  stat = 1;
3217  goto finished;
3218  }
3219 
3220  if (vport->port_state != LPFC_VPORT_READY)
3221  goto finished;
3222  if (vport->num_disc_nodes || vport->fc_prli_sent)
3223  goto finished;
3224  if (vport->fc_map_cnt == 0 && time < 2 * HZ)
3225  goto finished;
3226  if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3227  goto finished;
3228 
3229  stat = 1;
3230 
3231 finished:
3232  spin_unlock_irq(shost->host_lock);
3233  return stat;
3234 }
3235 
3244 {
3245  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3246  struct lpfc_hba *phba = vport->phba;
3247  /*
3248  * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3249  */
3250 
3251  fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3252  fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3254 
3255  memset(fc_host_supported_fc4s(shost), 0,
3256  sizeof(fc_host_supported_fc4s(shost)));
3257  fc_host_supported_fc4s(shost)[2] = 1;
3258  fc_host_supported_fc4s(shost)[7] = 1;
3259 
3261  sizeof fc_host_symbolic_name(shost));
3262 
3263  fc_host_supported_speeds(shost) = 0;
3264  if (phba->lmt & LMT_16Gb)
3266  if (phba->lmt & LMT_10Gb)
3268  if (phba->lmt & LMT_8Gb)
3270  if (phba->lmt & LMT_4Gb)
3272  if (phba->lmt & LMT_2Gb)
3274  if (phba->lmt & LMT_1Gb)
3276 
3277  fc_host_maxframe_size(shost) =
3278  (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3279  (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3280 
3281  fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3282 
3283  /* This value is also unchanging */
3284  memset(fc_host_active_fc4s(shost), 0,
3285  sizeof(fc_host_active_fc4s(shost)));
3286  fc_host_active_fc4s(shost)[2] = 1;
3287  fc_host_active_fc4s(shost)[7] = 1;
3288 
3289  fc_host_max_npiv_vports(shost) = phba->max_vpi;
3290  spin_lock_irq(shost->host_lock);
3291  vport->load_flag &= ~FC_LOADING;
3292  spin_unlock_irq(shost->host_lock);
3293 }
3294 
3303 static void
3304 lpfc_stop_port_s3(struct lpfc_hba *phba)
3305 {
3306  /* Clear all interrupt enable conditions */
3307  writel(0, phba->HCregaddr);
3308  readl(phba->HCregaddr); /* flush */
3309  /* Clear all pending interrupts */
3310  writel(0xffffffff, phba->HAregaddr);
3311  readl(phba->HAregaddr); /* flush */
3312 
3313  /* Reset some HBA SLI setup states */
3314  lpfc_stop_hba_timers(phba);
3315  phba->pport->work_port_events = 0;
3316 }
3317 
3326 static void
3327 lpfc_stop_port_s4(struct lpfc_hba *phba)
3328 {
3329  /* Reset some HBA SLI4 setup states */
3330  lpfc_stop_hba_timers(phba);
3331  phba->pport->work_port_events = 0;
3332  phba->sli4_hba.intr_enable = 0;
3333 }
3334 
3342 void
3344 {
3345  phba->lpfc_stop_port(phba);
3346 }
3347 
3354 void
3356 {
3357  unsigned long fcf_redisc_wait_tmo =
3359  /* Start fcf rediscovery wait period timer */
3360  mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3361  spin_lock_irq(&phba->hbalock);
3362  /* Allow action to new fcf asynchronous event */
3363  phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3364  /* Mark the FCF rediscovery pending state */
3365  phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3366  spin_unlock_irq(&phba->hbalock);
3367 }
3368 
3379 void
3381 {
3382  struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3383 
3384  /* Don't send FCF rediscovery event if timer cancelled */
3385  spin_lock_irq(&phba->hbalock);
3386  if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3387  spin_unlock_irq(&phba->hbalock);
3388  return;
3389  }
3390  /* Clear FCF rediscovery timer pending flag */
3391  phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3392  /* FCF rediscovery event to worker thread */
3393  phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3394  spin_unlock_irq(&phba->hbalock);
3396  "2776 FCF rediscover quiescent timer expired\n");
3397  /* wake up worker thread */
3398  lpfc_worker_wake_up(phba);
3399 }
3400 
3412 static uint16_t
3413 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3414  struct lpfc_acqe_link *acqe_link)
3415 {
3416  uint16_t latt_fault;
3417 
3418  switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3422  latt_fault = 0;
3423  break;
3424  default:
3426  "0398 Invalid link fault code: x%x\n",
3427  bf_get(lpfc_acqe_link_fault, acqe_link));
3428  latt_fault = MBXERR_ERROR;
3429  break;
3430  }
3431  return latt_fault;
3432 }
3433 
3444 static uint8_t
3445 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3446  struct lpfc_acqe_link *acqe_link)
3447 {
3448  uint8_t att_type;
3449 
3450  switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3453  att_type = LPFC_ATT_LINK_DOWN;
3454  break;
3456  /* Ignore physical link up events - wait for logical link up */
3457  att_type = LPFC_ATT_RESERVED;
3458  break;
3460  att_type = LPFC_ATT_LINK_UP;
3461  break;
3462  default:
3464  "0399 Invalid link attention type: x%x\n",
3465  bf_get(lpfc_acqe_link_status, acqe_link));
3466  att_type = LPFC_ATT_RESERVED;
3467  break;
3468  }
3469  return att_type;
3470 }
3471 
3482 static uint8_t
3483 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3484  struct lpfc_acqe_link *acqe_link)
3485 {
3487 
3488  switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3492  link_speed = LPFC_LINK_SPEED_UNKNOWN;
3493  break;
3495  link_speed = LPFC_LINK_SPEED_1GHZ;
3496  break;
3498  link_speed = LPFC_LINK_SPEED_10GHZ;
3499  break;
3500  default:
3502  "0483 Invalid link-attention link speed: x%x\n",
3503  bf_get(lpfc_acqe_link_speed, acqe_link));
3504  link_speed = LPFC_LINK_SPEED_UNKNOWN;
3505  break;
3506  }
3507  return link_speed;
3508 }
3509 
3518 uint32_t
3520 {
3522 
3523  if (!lpfc_is_link_up(phba))
3524  return 0;
3525 
3526  switch (phba->fc_linkspeed) {
3527  case LPFC_LINK_SPEED_1GHZ:
3528  link_speed = 1000;
3529  break;
3530  case LPFC_LINK_SPEED_2GHZ:
3531  link_speed = 2000;
3532  break;
3533  case LPFC_LINK_SPEED_4GHZ:
3534  link_speed = 4000;
3535  break;
3536  case LPFC_LINK_SPEED_8GHZ:
3537  link_speed = 8000;
3538  break;
3539  case LPFC_LINK_SPEED_10GHZ:
3540  link_speed = 10000;
3541  break;
3542  case LPFC_LINK_SPEED_16GHZ:
3543  link_speed = 16000;
3544  break;
3545  default:
3546  link_speed = 0;
3547  }
3548  return link_speed;
3549 }
3550 
3562 static uint32_t
3563 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3564  uint8_t speed_code)
3565 {
3566  uint32_t port_speed;
3567 
3568  switch (evt_code) {
3570  switch (speed_code) {
3572  port_speed = 0;
3573  break;
3575  port_speed = 10;
3576  break;
3578  port_speed = 100;
3579  break;
3581  port_speed = 1000;
3582  break;
3584  port_speed = 10000;
3585  break;
3586  default:
3587  port_speed = 0;
3588  }
3589  break;
3590  case LPFC_TRAILER_CODE_FC:
3591  switch (speed_code) {
3593  port_speed = 0;
3594  break;
3596  port_speed = 1000;
3597  break;
3599  port_speed = 2000;
3600  break;
3602  port_speed = 4000;
3603  break;
3605  port_speed = 8000;
3606  break;
3608  port_speed = 10000;
3609  break;
3611  port_speed = 16000;
3612  break;
3613  default:
3614  port_speed = 0;
3615  }
3616  break;
3617  default:
3618  port_speed = 0;
3619  }
3620  return port_speed;
3621 }
3622 
3630 static void
3631 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3632  struct lpfc_acqe_link *acqe_link)
3633 {
3634  struct lpfc_dmabuf *mp;
3635  LPFC_MBOXQ_t *pmb;
3636  MAILBOX_t *mb;
3637  struct lpfc_mbx_read_top *la;
3638  uint8_t att_type;
3639  int rc;
3640 
3641  att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3642  if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3643  return;
3644  phba->fcoe_eventtag = acqe_link->event_tag;
3646  if (!pmb) {
3648  "0395 The mboxq allocation failed\n");
3649  return;
3650  }
3651  mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3652  if (!mp) {
3654  "0396 The lpfc_dmabuf allocation failed\n");
3655  goto out_free_pmb;
3656  }
3657  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3658  if (!mp->virt) {
3660  "0397 The mbuf allocation failed\n");
3661  goto out_free_dmabuf;
3662  }
3663 
3664  /* Cleanup any outstanding ELS commands */
3665  lpfc_els_flush_all_cmd(phba);
3666 
3667  /* Block ELS IOCBs until we have done process link event */
3668  phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3669 
3670  /* Update link event statistics */
3671  phba->sli.slistat.link_event++;
3672 
3673  /* Create lpfc_handle_latt mailbox command from link ACQE */
3674  lpfc_read_topology(phba, pmb, mp);
3676  pmb->vport = phba->pport;
3677 
3678  /* Keep the link status for extra SLI4 state machine reference */
3679  phba->sli4_hba.link_state.speed =
3680  lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3681  bf_get(lpfc_acqe_link_speed, acqe_link));
3682  phba->sli4_hba.link_state.duplex =
3683  bf_get(lpfc_acqe_link_duplex, acqe_link);
3684  phba->sli4_hba.link_state.status =
3685  bf_get(lpfc_acqe_link_status, acqe_link);
3686  phba->sli4_hba.link_state.type =
3687  bf_get(lpfc_acqe_link_type, acqe_link);
3688  phba->sli4_hba.link_state.number =
3689  bf_get(lpfc_acqe_link_number, acqe_link);
3690  phba->sli4_hba.link_state.fault =
3691  bf_get(lpfc_acqe_link_fault, acqe_link);
3692  phba->sli4_hba.link_state.logical_speed =
3693  bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3694 
3696  "2900 Async FC/FCoE Link event - Speed:%dGBit "
3697  "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3698  "Logical speed:%dMbps Fault:%d\n",
3699  phba->sli4_hba.link_state.speed,
3700  phba->sli4_hba.link_state.topology,
3701  phba->sli4_hba.link_state.status,
3702  phba->sli4_hba.link_state.type,
3703  phba->sli4_hba.link_state.number,
3704  phba->sli4_hba.link_state.logical_speed,
3705  phba->sli4_hba.link_state.fault);
3706  /*
3707  * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3708  * topology info. Note: Optional for non FC-AL ports.
3709  */
3710  if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3711  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3712  if (rc == MBX_NOT_FINISHED)
3713  goto out_free_dmabuf;
3714  return;
3715  }
3716  /*
3717  * For FCoE Mode: fill in all the topology information we need and call
3718  * the READ_TOPOLOGY completion routine to continue without actually
3719  * sending the READ_TOPOLOGY mailbox command to the port.
3720  */
3721  /* Parse and translate status field */
3722  mb = &pmb->u.mb;
3723  mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3724 
3725  /* Parse and translate link attention fields */
3726  la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3727  la->eventTag = acqe_link->event_tag;
3728  bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3729  bf_set(lpfc_mbx_read_top_link_spd, la,
3730  lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3731 
3732  /* Fake the the following irrelvant fields */
3733  bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3734  bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3735  bf_set(lpfc_mbx_read_top_il, la, 0);
3736  bf_set(lpfc_mbx_read_top_pb, la, 0);
3737  bf_set(lpfc_mbx_read_top_fa, la, 0);
3738  bf_set(lpfc_mbx_read_top_mm, la, 0);
3739 
3740  /* Invoke the lpfc_handle_latt mailbox command callback function */
3741  lpfc_mbx_cmpl_read_topology(phba, pmb);
3742 
3743  return;
3744 
3745 out_free_dmabuf:
3746  kfree(mp);
3747 out_free_pmb:
3748  mempool_free(pmb, phba->mbox_mem_pool);
3749 }
3750 
3760 static void
3761 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3762 {
3763  struct lpfc_dmabuf *mp;
3764  LPFC_MBOXQ_t *pmb;
3765  int rc;
3766 
3767  if (bf_get(lpfc_trailer_type, acqe_fc) !=
3770  "2895 Non FC link Event detected.(%d)\n",
3771  bf_get(lpfc_trailer_type, acqe_fc));
3772  return;
3773  }
3774  /* Keep the link status for extra SLI4 state machine reference */
3775  phba->sli4_hba.link_state.speed =
3776  lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3777  bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3778  phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3779  phba->sli4_hba.link_state.topology =
3780  bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3781  phba->sli4_hba.link_state.status =
3782  bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3783  phba->sli4_hba.link_state.type =
3784  bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3785  phba->sli4_hba.link_state.number =
3786  bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3787  phba->sli4_hba.link_state.fault =
3788  bf_get(lpfc_acqe_link_fault, acqe_fc);
3789  phba->sli4_hba.link_state.logical_speed =
3790  bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3792  "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3793  "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3794  "%dMbps Fault:%d\n",
3795  phba->sli4_hba.link_state.speed,
3796  phba->sli4_hba.link_state.topology,
3797  phba->sli4_hba.link_state.status,
3798  phba->sli4_hba.link_state.type,
3799  phba->sli4_hba.link_state.number,
3800  phba->sli4_hba.link_state.logical_speed,
3801  phba->sli4_hba.link_state.fault);
3803  if (!pmb) {
3805  "2897 The mboxq allocation failed\n");
3806  return;
3807  }
3808  mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3809  if (!mp) {
3811  "2898 The lpfc_dmabuf allocation failed\n");
3812  goto out_free_pmb;
3813  }
3814  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3815  if (!mp->virt) {
3817  "2899 The mbuf allocation failed\n");
3818  goto out_free_dmabuf;
3819  }
3820 
3821  /* Cleanup any outstanding ELS commands */
3822  lpfc_els_flush_all_cmd(phba);
3823 
3824  /* Block ELS IOCBs until we have done process link event */
3825  phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3826 
3827  /* Update link event statistics */
3828  phba->sli.slistat.link_event++;
3829 
3830  /* Create lpfc_handle_latt mailbox command from link ACQE */
3831  lpfc_read_topology(phba, pmb, mp);
3833  pmb->vport = phba->pport;
3834 
3835  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3836  if (rc == MBX_NOT_FINISHED)
3837  goto out_free_dmabuf;
3838  return;
3839 
3840 out_free_dmabuf:
3841  kfree(mp);
3842 out_free_pmb:
3843  mempool_free(pmb, phba->mbox_mem_pool);
3844 }
3845 
3853 static void
3854 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3855 {
3856  char port_name;
3857  char message[80];
3858  uint8_t status;
3859  struct lpfc_acqe_misconfigured_event *misconfigured;
3860 
3861  /* special case misconfigured event as it contains data for all ports */
3862  if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3864  (bf_get(lpfc_trailer_type, acqe_sli) !=
3867  "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3868  "x%08x SLI Event Type:%d\n",
3869  acqe_sli->event_data1, acqe_sli->event_data2,
3870  bf_get(lpfc_trailer_type, acqe_sli));
3871  return;
3872  }
3873 
3874  port_name = phba->Port[0];
3875  if (port_name == 0x00)
3876  port_name = '?'; /* get port name is empty */
3877 
3878  misconfigured = (struct lpfc_acqe_misconfigured_event *)
3879  &acqe_sli->event_data1;
3880 
3881  /* fetch the status for this port */
3882  switch (phba->sli4_hba.lnk_info.lnk_no) {
3883  case LPFC_LINK_NUMBER_0:
3884  status = bf_get(lpfc_sli_misconfigured_port0,
3885  &misconfigured->theEvent);
3886  break;
3887  case LPFC_LINK_NUMBER_1:
3888  status = bf_get(lpfc_sli_misconfigured_port1,
3889  &misconfigured->theEvent);
3890  break;
3891  case LPFC_LINK_NUMBER_2:
3892  status = bf_get(lpfc_sli_misconfigured_port2,
3893  &misconfigured->theEvent);
3894  break;
3895  case LPFC_LINK_NUMBER_3:
3896  status = bf_get(lpfc_sli_misconfigured_port3,
3897  &misconfigured->theEvent);
3898  break;
3899  default:
3900  status = ~LPFC_SLI_EVENT_STATUS_VALID;
3901  break;
3902  }
3903 
3904  switch (status) {
3906  return; /* no message if the sfp is okay */
3908  sprintf(message, "Optics faulted/incorrectly installed/not " \
3909  "installed - Reseat optics, if issue not "
3910  "resolved, replace.");
3911  break;
3913  sprintf(message,
3914  "Optics of two types installed - Remove one optic or " \
3915  "install matching pair of optics.");
3916  break;
3918  sprintf(message, "Incompatible optics - Replace with " \
3919  "compatible optics for card to function.");
3920  break;
3921  default:
3922  /* firmware is reporting a status we don't know about */
3923  sprintf(message, "Unknown event status x%02x", status);
3924  break;
3925  }
3926 
3928  "3176 Misconfigured Physical Port - "
3929  "Port Name %c %s\n", port_name, message);
3930 }
3931 
3942 static struct lpfc_nodelist *
3943 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3944 {
3945  struct lpfc_nodelist *ndlp;
3946  struct Scsi_Host *shost;
3947  struct lpfc_hba *phba;
3948 
3949  if (!vport)
3950  return NULL;
3951  phba = vport->phba;
3952  if (!phba)
3953  return NULL;
3954  ndlp = lpfc_findnode_did(vport, Fabric_DID);
3955  if (!ndlp) {
3956  /* Cannot find existing Fabric ndlp, so allocate a new one */
3957  ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3958  if (!ndlp)
3959  return 0;
3960  lpfc_nlp_init(vport, ndlp, Fabric_DID);
3961  /* Set the node type */
3962  ndlp->nlp_type |= NLP_FABRIC;
3963  /* Put ndlp onto node list */
3964  lpfc_enqueue_node(vport, ndlp);
3965  } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3966  /* re-setup ndlp without removing from node list */
3967  ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3968  if (!ndlp)
3969  return 0;
3970  }
3971  if ((phba->pport->port_state < LPFC_FLOGI) &&
3972  (phba->pport->port_state != LPFC_VPORT_FAILED))
3973  return NULL;
3974  /* If virtual link is not yet instantiated ignore CVL */
3975  if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3976  && (vport->port_state != LPFC_VPORT_FAILED))
3977  return NULL;
3978  shost = lpfc_shost_from_vport(vport);
3979  if (!shost)
3980  return NULL;
3981  lpfc_linkdown_port(vport);
3983  spin_lock_irq(shost->host_lock);
3984  vport->fc_flag |= FC_VPORT_CVL_RCVD;
3985  spin_unlock_irq(shost->host_lock);
3986 
3987  return ndlp;
3988 }
3989 
3997 static void
3998 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3999 {
4000  struct lpfc_vport **vports;
4001  int i;
4002 
4003  vports = lpfc_create_vport_work_array(phba);
4004  if (vports)
4005  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4006  lpfc_sli4_perform_vport_cvl(vports[i]);
4007  lpfc_destroy_vport_work_array(phba, vports);
4008 }
4009 
4017 static void
4018 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4019  struct lpfc_acqe_fip *acqe_fip)
4020 {
4021  uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4022  int rc;
4023  struct lpfc_vport *vport;
4024  struct lpfc_nodelist *ndlp;
4025  struct Scsi_Host *shost;
4026  int active_vlink_present;
4027  struct lpfc_vport **vports;
4028  int i;
4029 
4030  phba->fc_eventTag = acqe_fip->event_tag;
4031  phba->fcoe_eventtag = acqe_fip->event_tag;
4032  switch (event_type) {
4035  if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4037  LOG_DISCOVERY,
4038  "2546 New FCF event, evt_tag:x%x, "
4039  "index:x%x\n",
4040  acqe_fip->event_tag,
4041  acqe_fip->index);
4042  else
4044  LOG_DISCOVERY,
4045  "2788 FCF param modified event, "
4046  "evt_tag:x%x, index:x%x\n",
4047  acqe_fip->event_tag,
4048  acqe_fip->index);
4049  if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4050  /*
4051  * During period of FCF discovery, read the FCF
4052  * table record indexed by the event to update
4053  * FCF roundrobin failover eligible FCF bmask.
4054  */
4056  LOG_DISCOVERY,
4057  "2779 Read FCF (x%x) for updating "
4058  "roundrobin FCF failover bmask\n",
4059  acqe_fip->index);
4060  rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4061  }
4062 
4063  /* If the FCF discovery is in progress, do nothing. */
4064  spin_lock_irq(&phba->hbalock);
4065  if (phba->hba_flag & FCF_TS_INPROG) {
4066  spin_unlock_irq(&phba->hbalock);
4067  break;
4068  }
4069  /* If fast FCF failover rescan event is pending, do nothing */
4070  if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4071  spin_unlock_irq(&phba->hbalock);
4072  break;
4073  }
4074 
4075  /* If the FCF has been in discovered state, do nothing. */
4076  if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4077  spin_unlock_irq(&phba->hbalock);
4078  break;
4079  }
4080  spin_unlock_irq(&phba->hbalock);
4081 
4082  /* Otherwise, scan the entire FCF table and re-discover SAN */
4084  "2770 Start FCF table scan per async FCF "
4085  "event, evt_tag:x%x, index:x%x\n",
4086  acqe_fip->event_tag, acqe_fip->index);
4089  if (rc)
4091  "2547 Issue FCF scan read FCF mailbox "
4092  "command failed (x%x)\n", rc);
4093  break;
4094 
4097  "2548 FCF Table full count 0x%x tag 0x%x\n",
4098  bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4099  acqe_fip->event_tag);
4100  break;
4101 
4103  phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4105  "2549 FCF (x%x) disconnected from network, "
4106  "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4107  /*
4108  * If we are in the middle of FCF failover process, clear
4109  * the corresponding FCF bit in the roundrobin bitmap.
4110  */
4111  spin_lock_irq(&phba->hbalock);
4112  if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4113  spin_unlock_irq(&phba->hbalock);
4114  /* Update FLOGI FCF failover eligible FCF bmask */
4115  lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4116  break;
4117  }
4118  spin_unlock_irq(&phba->hbalock);
4119 
4120  /* If the event is not for currently used fcf do nothing */
4121  if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4122  break;
4123 
4124  /*
4125  * Otherwise, request the port to rediscover the entire FCF
4126  * table for a fast recovery from case that the current FCF
4127  * is no longer valid as we are not in the middle of FCF
4128  * failover process already.
4129  */
4130  spin_lock_irq(&phba->hbalock);
4131  /* Mark the fast failover process in progress */
4132  phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4133  spin_unlock_irq(&phba->hbalock);
4134 
4136  "2771 Start FCF fast failover process due to "
4137  "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4138  "\n", acqe_fip->event_tag, acqe_fip->index);
4139  rc = lpfc_sli4_redisc_fcf_table(phba);
4140  if (rc) {
4142  LOG_DISCOVERY,
4143  "2772 Issue FCF rediscover mabilbox "
4144  "command failed, fail through to FCF "
4145  "dead event\n");
4146  spin_lock_irq(&phba->hbalock);
4147  phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4148  spin_unlock_irq(&phba->hbalock);
4149  /*
4150  * Last resort will fail over by treating this
4151  * as a link down to FCF registration.
4152  */
4154  } else {
4155  /* Reset FCF roundrobin bmask for new discovery */
4157  /*
4158  * Handling fast FCF failover to a DEAD FCF event is
4159  * considered equalivant to receiving CVL to all vports.
4160  */
4161  lpfc_sli4_perform_all_vport_cvl(phba);
4162  }
4163  break;
4165  phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4167  "2718 Clear Virtual Link Received for VPI 0x%x"
4168  " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4169 
4170  vport = lpfc_find_vport_by_vpid(phba,
4171  acqe_fip->index);
4172  ndlp = lpfc_sli4_perform_vport_cvl(vport);
4173  if (!ndlp)
4174  break;
4175  active_vlink_present = 0;
4176 
4177  vports = lpfc_create_vport_work_array(phba);
4178  if (vports) {
4179  for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4180  i++) {
4181  if ((!(vports[i]->fc_flag &
4182  FC_VPORT_CVL_RCVD)) &&
4183  (vports[i]->port_state > LPFC_FDISC)) {
4184  active_vlink_present = 1;
4185  break;
4186  }
4187  }
4188  lpfc_destroy_vport_work_array(phba, vports);
4189  }
4190 
4191  if (active_vlink_present) {
4192  /*
4193  * If there are other active VLinks present,
4194  * re-instantiate the Vlink using FDISC.
4195  */
4196  mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
4197  shost = lpfc_shost_from_vport(vport);
4198  spin_lock_irq(shost->host_lock);
4199  ndlp->nlp_flag |= NLP_DELAY_TMO;
4200  spin_unlock_irq(shost->host_lock);
4202  vport->port_state = LPFC_FDISC;
4203  } else {
4204  /*
4205  * Otherwise, we request port to rediscover
4206  * the entire FCF table for a fast recovery
4207  * from possible case that the current FCF
4208  * is no longer valid if we are not already
4209  * in the FCF failover process.
4210  */
4211  spin_lock_irq(&phba->hbalock);
4212  if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4213  spin_unlock_irq(&phba->hbalock);
4214  break;
4215  }
4216  /* Mark the fast failover process in progress */
4217  phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4218  spin_unlock_irq(&phba->hbalock);
4220  LOG_DISCOVERY,
4221  "2773 Start FCF failover per CVL, "
4222  "evt_tag:x%x\n", acqe_fip->event_tag);
4223  rc = lpfc_sli4_redisc_fcf_table(phba);
4224  if (rc) {
4226  LOG_DISCOVERY,
4227  "2774 Issue FCF rediscover "
4228  "mabilbox command failed, "
4229  "through to CVL event\n");
4230  spin_lock_irq(&phba->hbalock);
4231  phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4232  spin_unlock_irq(&phba->hbalock);
4233  /*
4234  * Last resort will be re-try on the
4235  * the current registered FCF entry.
4236  */
4238  } else
4239  /*
4240  * Reset FCF roundrobin bmask for new
4241  * discovery.
4242  */
4244  }
4245  break;
4246  default:
4248  "0288 Unknown FCoE event type 0x%x event tag "
4249  "0x%x\n", event_type, acqe_fip->event_tag);
4250  break;
4251  }
4252 }
4253 
4261 static void
4262 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4263  struct lpfc_acqe_dcbx *acqe_dcbx)
4264 {
4265  phba->fc_eventTag = acqe_dcbx->event_tag;
4267  "0290 The SLI4 DCBX asynchronous event is not "
4268  "handled yet\n");
4269 }
4270 
4280 static void
4281 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4282  struct lpfc_acqe_grp5 *acqe_grp5)
4283 {
4284  uint16_t prev_ll_spd;
4285 
4286  phba->fc_eventTag = acqe_grp5->event_tag;
4287  phba->fcoe_eventtag = acqe_grp5->event_tag;
4288  prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4289  phba->sli4_hba.link_state.logical_speed =
4290  (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4292  "2789 GRP5 Async Event: Updating logical link speed "
4293  "from %dMbps to %dMbps\n", prev_ll_spd,
4294  phba->sli4_hba.link_state.logical_speed);
4295 }
4296 
4305 {
4306  struct lpfc_cq_event *cq_event;
4307 
4308  /* First, declare the async event has been handled */
4309  spin_lock_irq(&phba->hbalock);
4310  phba->hba_flag &= ~ASYNC_EVENT;
4311  spin_unlock_irq(&phba->hbalock);
4312  /* Now, handle all the async events */
4313  while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4314  /* Get the first event from the head of the event queue */
4315  spin_lock_irq(&phba->hbalock);
4316  list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4317  cq_event, struct lpfc_cq_event, list);
4318  spin_unlock_irq(&phba->hbalock);
4319  /* Process the asynchronous event */
4320  switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4322  lpfc_sli4_async_link_evt(phba,
4323  &cq_event->cqe.acqe_link);
4324  break;
4326  lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4327  break;
4329  lpfc_sli4_async_dcbx_evt(phba,
4330  &cq_event->cqe.acqe_dcbx);
4331  break;
4333  lpfc_sli4_async_grp5_evt(phba,
4334  &cq_event->cqe.acqe_grp5);
4335  break;
4336  case LPFC_TRAILER_CODE_FC:
4337  lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4338  break;
4339  case LPFC_TRAILER_CODE_SLI:
4340  lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4341  break;
4342  default:
4344  "1804 Invalid asynchrous event code: "
4345  "x%x\n", bf_get(lpfc_trailer_code,
4346  &cq_event->cqe.mcqe_cmpl));
4347  break;
4348  }
4349  /* Free the completion event processed to the free pool */
4350  lpfc_sli4_cq_event_release(phba, cq_event);
4351  }
4352 }
4353 
4362 {
4363  int rc;
4364 
4365  spin_lock_irq(&phba->hbalock);
4366  /* Clear FCF rediscovery timeout event */
4367  phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4368  /* Clear driver fast failover FCF record flag */
4369  phba->fcf.failover_rec.flag = 0;
4370  /* Set state for FCF fast failover */
4371  phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4372  spin_unlock_irq(&phba->hbalock);
4373 
4374  /* Scan FCF table from the first entry to re-discover SAN */
4376  "2777 Start post-quiescent FCF table scan\n");
4378  if (rc)
4380  "2747 Issue FCF scan read FCF mailbox "
4381  "command failed 0x%x\n", rc);
4382 }
4383 
4394 int
4395 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4396 {
4397  int rc;
4398 
4399  /* Set up lpfc PCI-device group */
4400  phba->pci_dev_grp = dev_grp;
4401 
4402  /* The LPFC_PCI_DEV_OC uses SLI4 */
4403  if (dev_grp == LPFC_PCI_DEV_OC)
4404  phba->sli_rev = LPFC_SLI_REV4;
4405 
4406  /* Set up device INIT API function jump table */
4407  rc = lpfc_init_api_table_setup(phba, dev_grp);
4408  if (rc)
4409  return -ENODEV;
4410  /* Set up SCSI API function jump table */
4411  rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4412  if (rc)
4413  return -ENODEV;
4414  /* Set up SLI API function jump table */
4415  rc = lpfc_sli_api_table_setup(phba, dev_grp);
4416  if (rc)
4417  return -ENODEV;
4418  /* Set up MBOX API function jump table */
4419  rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4420  if (rc)
4421  return -ENODEV;
4422 
4423  return 0;
4424 }
4425 
4434 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4435 {
4436  switch (intr_mode) {
4437  case 0:
4439  "0470 Enable INTx interrupt mode.\n");
4440  break;
4441  case 1:
4443  "0481 Enabled MSI interrupt mode.\n");
4444  break;
4445  case 2:
4447  "0480 Enabled MSI-X interrupt mode.\n");
4448  break;
4449  default:
4451  "0482 Illegal interrupt mode.\n");
4452  break;
4453  }
4454  return;
4455 }
4456 
4468 static int
4469 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4470 {
4471  struct pci_dev *pdev;
4472  int bars = 0;
4473 
4474  /* Obtain PCI device reference */
4475  if (!phba->pcidev)
4476  goto out_error;
4477  else
4478  pdev = phba->pcidev;
4479  /* Select PCI BARs */
4480  bars = pci_select_bars(pdev, IORESOURCE_MEM);
4481  /* Enable PCI device */
4482  if (pci_enable_device_mem(pdev))
4483  goto out_error;
4484  /* Request PCI resource for the device */
4486  goto out_disable_device;
4487  /* Set up device as PCI master and save state for EEH */
4488  pci_set_master(pdev);
4489  pci_try_set_mwi(pdev);
4490  pci_save_state(pdev);
4491 
4492  /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4494  pdev->needs_freset = 1;
4495 
4496  return 0;
4497 
4498 out_disable_device:
4499  pci_disable_device(pdev);
4500 out_error:
4502  "1401 Failed to enable pci device, bars:x%x\n", bars);
4503  return -ENODEV;
4504 }
4505 
4513 static void
4514 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4515 {
4516  struct pci_dev *pdev;
4517  int bars;
4518 
4519  /* Obtain PCI device reference */
4520  if (!phba->pcidev)
4521  return;
4522  else
4523  pdev = phba->pcidev;
4524  /* Select PCI BARs */
4525  bars = pci_select_bars(pdev, IORESOURCE_MEM);
4526  /* Release PCI resource and disable PCI device */
4527  pci_release_selected_regions(pdev, bars);
4528  pci_disable_device(pdev);
4529  /* Null out PCI private reference to driver */
4530  pci_set_drvdata(pdev, NULL);
4531 
4532  return;
4533 }
4534 
4544 void
4546 {
4547  /* If resets are disabled then set error state and return. */
4548  if (!phba->cfg_enable_hba_reset) {
4549  phba->link_state = LPFC_HBA_ERROR;
4550  return;
4551  }
4553  lpfc_offline(phba);
4554  lpfc_sli_brdrestart(phba);
4555  lpfc_online(phba);
4556  lpfc_unblock_mgmt_io(phba);
4557 }
4558 
4569 uint16_t
4571 {
4572  struct pci_dev *pdev = phba->pcidev;
4573  uint16_t nr_virtfn;
4574  int pos;
4575 
4577  if (pos == 0)
4578  return 0;
4579 
4580  pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4581  return nr_virtfn;
4582 }
4583 
4595 int
4596 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4597 {
4598  struct pci_dev *pdev = phba->pcidev;
4599  uint16_t max_nr_vfn;
4600  int rc;
4601 
4602  max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4603  if (nr_vfn > max_nr_vfn) {
4605  "3057 Requested vfs (%d) greater than "
4606  "supported vfs (%d)", nr_vfn, max_nr_vfn);
4607  return -EINVAL;
4608  }
4609 
4610  rc = pci_enable_sriov(pdev, nr_vfn);
4611  if (rc) {
4613  "2806 Failed to enable sriov on this device "
4614  "with vfn number nr_vf:%d, rc:%d\n",
4615  nr_vfn, rc);
4616  } else
4618  "2807 Successful enable sriov on this device "
4619  "with vfn number nr_vf:%d\n", nr_vfn);
4620  return rc;
4621 }
4622 
4634 static int
4635 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4636 {
4637  struct lpfc_sli *psli;
4638  int rc;
4639 
4640  /*
4641  * Initialize timers used by driver
4642  */
4643 
4644  /* Heartbeat timer */
4645  init_timer(&phba->hb_tmofunc);
4646  phba->hb_tmofunc.function = lpfc_hb_timeout;
4647  phba->hb_tmofunc.data = (unsigned long)phba;
4648 
4649  psli = &phba->sli;
4650  /* MBOX heartbeat timer */
4651  init_timer(&psli->mbox_tmo);
4652  psli->mbox_tmo.function = lpfc_mbox_timeout;
4653  psli->mbox_tmo.data = (unsigned long) phba;
4654  /* FCP polling mode timer */
4655  init_timer(&phba->fcp_poll_timer);
4656  phba->fcp_poll_timer.function = lpfc_poll_timeout;
4657  phba->fcp_poll_timer.data = (unsigned long) phba;
4658  /* Fabric block timer */
4661  phba->fabric_block_timer.data = (unsigned long) phba;
4662  /* EA polling mode timer */
4663  init_timer(&phba->eratt_poll);
4664  phba->eratt_poll.function = lpfc_poll_eratt;
4665  phba->eratt_poll.data = (unsigned long) phba;
4666 
4667  /* Host attention work mask setup */
4668  phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4669  phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4670 
4671  /* Get all the module params for configuring this host */
4672  lpfc_get_cfgparam(phba);
4673  if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4674  phba->menlo_flag |= HBA_MENLO_SUPPORT;
4675  /* check for menlo minimum sg count */
4678  }
4679 
4680  if (!phba->sli.ring)
4681  phba->sli.ring = (struct lpfc_sli_ring *)
4682  kzalloc(LPFC_SLI3_MAX_RING *
4683  sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4684  if (!phba->sli.ring)
4685  return -ENOMEM;
4686 
4687  /*
4688  * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4689  * used to create the sg_dma_buf_pool must be dynamically calculated.
4690  * 2 segments are added since the IOCB needs a command and response bde.
4691  */
4692  phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4693  sizeof(struct fcp_rsp) +
4694  ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4695 
4696  if (phba->cfg_enable_bg) {
4698  phba->cfg_sg_dma_buf_size +=
4699  phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4700  }
4701 
4702  /* Also reinitialize the host templates with new values. */
4703  lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4704  lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4705 
4706  phba->max_vpi = LPFC_MAX_VPI;
4707  /* This will be set to correct value after config_port mbox */
4708  phba->max_vports = 0;
4709 
4710  /*
4711  * Initialize the SLI Layer to run with lpfc HBAs.
4712  */
4713  lpfc_sli_setup(phba);
4714  lpfc_sli_queue_setup(phba);
4715 
4716  /* Allocate device driver memory */
4717  if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4718  return -ENOMEM;
4719 
4720  /*
4721  * Enable sr-iov virtual functions if supported and configured
4722  * through the module parameter.
4723  */
4724  if (phba->cfg_sriov_nr_virtfn > 0) {
4726  phba->cfg_sriov_nr_virtfn);
4727  if (rc) {
4729  "2808 Requested number of SR-IOV "
4730  "virtual functions (%d) is not "
4731  "supported\n",
4732  phba->cfg_sriov_nr_virtfn);
4733  phba->cfg_sriov_nr_virtfn = 0;
4734  }
4735  }
4736 
4737  return 0;
4738 }
4739 
4747 static void
4748 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4749 {
4750  /* Free device driver memory allocated */
4751  lpfc_mem_free_all(phba);
4752 
4753  return;
4754 }
4755 
4767 static int
4768 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4769 {
4770  struct lpfc_sli *psli;
4772  int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4773  uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4774  struct lpfc_mqe *mqe;
4775  int longs, sli_family;
4776  int sges_per_segment;
4777 
4778  /* Before proceed, wait for POST done and device ready */
4779  rc = lpfc_sli4_post_status_check(phba);
4780  if (rc)
4781  return -ENODEV;
4782 
4783  /*
4784  * Initialize timers used by driver
4785  */
4786 
4787  /* Heartbeat timer */
4788  init_timer(&phba->hb_tmofunc);
4789  phba->hb_tmofunc.function = lpfc_hb_timeout;
4790  phba->hb_tmofunc.data = (unsigned long)phba;
4791  init_timer(&phba->rrq_tmr);
4792  phba->rrq_tmr.function = lpfc_rrq_timeout;
4793  phba->rrq_tmr.data = (unsigned long)phba;
4794 
4795  psli = &phba->sli;
4796  /* MBOX heartbeat timer */
4797  init_timer(&psli->mbox_tmo);
4798  psli->mbox_tmo.function = lpfc_mbox_timeout;
4799  psli->mbox_tmo.data = (unsigned long) phba;
4800  /* Fabric block timer */
4803  phba->fabric_block_timer.data = (unsigned long) phba;
4804  /* EA polling mode timer */
4805  init_timer(&phba->eratt_poll);
4806  phba->eratt_poll.function = lpfc_poll_eratt;
4807  phba->eratt_poll.data = (unsigned long) phba;
4808  /* FCF rediscover timer */
4809  init_timer(&phba->fcf.redisc_wait);
4810  phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4811  phba->fcf.redisc_wait.data = (unsigned long)phba;
4812 
4813  /*
4814  * Control structure for handling external multi-buffer mailbox
4815  * command pass-through.
4816  */
4817  memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4818  sizeof(struct lpfc_mbox_ext_buf_ctx));
4819  INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4820 
4821  /*
4822  * We need to do a READ_CONFIG mailbox command here before
4823  * calling lpfc_get_cfgparam. For VFs this will report the
4824  * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4825  * All of the resources allocated
4826  * for this Port are tied to these values.
4827  */
4828  /* Get all the module params for configuring this host */
4829  lpfc_get_cfgparam(phba);
4830  phba->max_vpi = LPFC_MAX_VPI;
4831 
4832  /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4833  phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4834 
4835  /* This will be set to correct value after the read_config mbox */
4836  phba->max_vports = 0;
4837 
4838  /* Program the default value of vlan_id and fc_map */
4839  phba->valid_vlan = 0;
4840  phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4841  phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4842  phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4843 
4844  /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4845  sges_per_segment = 1;
4846  if (phba->cfg_enable_bg)
4847  sges_per_segment = 2;
4848 
4849  /*
4850  * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4851  * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4852  */
4853  if (!phba->sli.ring)
4854  phba->sli.ring = kzalloc(
4856  sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4857  if (!phba->sli.ring)
4858  return -ENOMEM;
4859  /*
4860  * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4861  * used to create the sg_dma_buf_pool must be dynamically calculated.
4862  * 2 segments are added since the IOCB needs a command and response bde.
4863  * To insure that the scsi sgl does not cross a 4k page boundary only
4864  * sgl sizes of must be a power of 2.
4865  */
4866  buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4867  (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
4868  sizeof(struct sli4_sge)));
4869 
4870  sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4871  max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4872  switch (sli_family) {
4875  /* There is a single hint for BE - 2 pages per BPL. */
4876  if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4878  max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4879  break;
4882  default:
4883  break;
4884  }
4885 
4886  for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4887  dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4888  dma_buf_size = dma_buf_size << 1)
4889  ;
4890  if (dma_buf_size == max_buf_size)
4891  phba->cfg_sg_seg_cnt = (dma_buf_size -
4892  sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4893  (2 * sizeof(struct sli4_sge))) /
4895  phba->cfg_sg_dma_buf_size = dma_buf_size;
4896 
4897  /* Initialize buffer queue management fields */
4898  hbq_count = lpfc_sli_hbq_count();
4899  for (i = 0; i < hbq_count; ++i)
4900  INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4901  INIT_LIST_HEAD(&phba->rb_pend_list);
4902  phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4903  phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4904 
4905  /*
4906  * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4907  */
4908  /* Initialize the Abort scsi buffer list used by driver */
4909  spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4910  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4911  /* This abort list used by worker thread */
4912  spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4913 
4914  /*
4915  * Initialize driver internal slow-path work queues
4916  */
4917 
4918  /* Driver internel slow-path CQ Event pool */
4919  INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4920  /* Response IOCB work queue list */
4921  INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4922  /* Asynchronous event CQ Event work queue list */
4923  INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4924  /* Fast-path XRI aborted CQ Event work queue list */
4925  INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4926  /* Slow-path XRI aborted CQ Event work queue list */
4927  INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4928  /* Receive queue CQ Event work queue list */
4929  INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4930 
4931  /* Initialize extent block lists. */
4932  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4933  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4934  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4935  INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4936 
4937  /* Initialize the driver internal SLI layer lists. */
4938  lpfc_sli_setup(phba);
4939  lpfc_sli_queue_setup(phba);
4940 
4941  /* Allocate device driver memory */
4942  rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4943  if (rc)
4944  return -ENOMEM;
4945 
4946  /* IF Type 2 ports get initialized now. */
4947  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4949  rc = lpfc_pci_function_reset(phba);
4950  if (unlikely(rc))
4951  return -ENODEV;
4952  }
4953 
4954  /* Create the bootstrap mailbox command */
4955  rc = lpfc_create_bootstrap_mbox(phba);
4956  if (unlikely(rc))
4957  goto out_free_mem;
4958 
4959  /* Set up the host's endian order with the device. */
4960  rc = lpfc_setup_endian_order(phba);
4961  if (unlikely(rc))
4962  goto out_free_bsmbx;
4963 
4964  /* Set up the hba's configuration parameters. */
4965  rc = lpfc_sli4_read_config(phba);
4966  if (unlikely(rc))
4967  goto out_free_bsmbx;
4968 
4969  /* IF Type 0 ports get initialized now. */
4970  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4972  rc = lpfc_pci_function_reset(phba);
4973  if (unlikely(rc))
4974  goto out_free_bsmbx;
4975  }
4976 
4977  mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4978  GFP_KERNEL);
4979  if (!mboxq) {
4980  rc = -ENOMEM;
4981  goto out_free_bsmbx;
4982  }
4983 
4984  /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4985  lpfc_supported_pages(mboxq);
4986  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4987  if (!rc) {
4988  mqe = &mboxq->u.mqe;
4989  memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4991  for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4992  switch (pn_page[i]) {
4993  case LPFC_SLI4_PARAMETERS:
4994  phba->sli4_hba.pc_sli4_params.supported = 1;
4995  break;
4996  default:
4997  break;
4998  }
4999  }
5000  /* Read the port's SLI4 Parameters capabilities if supported. */
5001  if (phba->sli4_hba.pc_sli4_params.supported)
5002  rc = lpfc_pc_sli4_params_get(phba, mboxq);
5003  if (rc) {
5004  mempool_free(mboxq, phba->mbox_mem_pool);
5005  rc = -EIO;
5006  goto out_free_bsmbx;
5007  }
5008  }
5009  /*
5010  * Get sli4 parameters that override parameters from Port capabilities.
5011  * If this call fails, it isn't critical unless the SLI4 parameters come
5012  * back in conflict.
5013  */
5014  rc = lpfc_get_sli4_parameters(phba, mboxq);
5015  if (rc) {
5016  if (phba->sli4_hba.extents_in_use &&
5017  phba->sli4_hba.rpi_hdrs_in_use) {
5019  "2999 Unsupported SLI4 Parameters "
5020  "Extents and RPI headers enabled.\n");
5021  goto out_free_bsmbx;
5022  }
5023  }
5024  mempool_free(mboxq, phba->mbox_mem_pool);
5025  /* Verify all the SLI4 queues */
5026  rc = lpfc_sli4_queue_verify(phba);
5027  if (rc)
5028  goto out_free_bsmbx;
5029 
5030  /* Create driver internal CQE event pool */
5031  rc = lpfc_sli4_cq_event_pool_create(phba);
5032  if (rc)
5033  goto out_free_bsmbx;
5034 
5035  /* Initialize sgl lists per host */
5036  lpfc_init_sgl_list(phba);
5037 
5038  /* Allocate and initialize active sgl array */
5039  rc = lpfc_init_active_sgl_array(phba);
5040  if (rc) {
5042  "1430 Failed to initialize sgl list.\n");
5043  goto out_destroy_cq_event_pool;
5044  }
5045  rc = lpfc_sli4_init_rpi_hdrs(phba);
5046  if (rc) {
5048  "1432 Failed to initialize rpi headers.\n");
5049  goto out_free_active_sgl;
5050  }
5051 
5052  /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5054  phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5055  GFP_KERNEL);
5056  if (!phba->fcf.fcf_rr_bmask) {
5058  "2759 Failed allocate memory for FCF round "
5059  "robin failover bmask\n");
5060  rc = -ENOMEM;
5061  goto out_remove_rpi_hdrs;
5062  }
5063 
5064  phba->sli4_hba.fcp_eq_hdl =
5065  kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5066  phba->cfg_fcp_io_channel), GFP_KERNEL);
5067  if (!phba->sli4_hba.fcp_eq_hdl) {
5069  "2572 Failed allocate memory for "
5070  "fast-path per-EQ handle array\n");
5071  rc = -ENOMEM;
5072  goto out_free_fcf_rr_bmask;
5073  }
5074 
5075  phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5076  phba->cfg_fcp_io_channel), GFP_KERNEL);
5077  if (!phba->sli4_hba.msix_entries) {
5079  "2573 Failed allocate memory for msi-x "
5080  "interrupt vector entries\n");
5081  rc = -ENOMEM;
5082  goto out_free_fcp_eq_hdl;
5083  }
5084 
5085  /*
5086  * Enable sr-iov virtual functions if supported and configured
5087  * through the module parameter.
5088  */
5089  if (phba->cfg_sriov_nr_virtfn > 0) {
5091  phba->cfg_sriov_nr_virtfn);
5092  if (rc) {
5094  "3020 Requested number of SR-IOV "
5095  "virtual functions (%d) is not "
5096  "supported\n",
5097  phba->cfg_sriov_nr_virtfn);
5098  phba->cfg_sriov_nr_virtfn = 0;
5099  }
5100  }
5101 
5102  return 0;
5103 
5104 out_free_fcp_eq_hdl:
5105  kfree(phba->sli4_hba.fcp_eq_hdl);
5106 out_free_fcf_rr_bmask:
5107  kfree(phba->fcf.fcf_rr_bmask);
5108 out_remove_rpi_hdrs:
5110 out_free_active_sgl:
5111  lpfc_free_active_sgl(phba);
5112 out_destroy_cq_event_pool:
5113  lpfc_sli4_cq_event_pool_destroy(phba);
5114 out_free_bsmbx:
5115  lpfc_destroy_bootstrap_mbox(phba);
5116 out_free_mem:
5117  lpfc_mem_free(phba);
5118  return rc;
5119 }
5120 
5128 static void
5129 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5130 {
5131  struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5132 
5133  /* Free memory allocated for msi-x interrupt vector entries */
5134  kfree(phba->sli4_hba.msix_entries);
5135 
5136  /* Free memory allocated for fast-path work queue handles */
5137  kfree(phba->sli4_hba.fcp_eq_hdl);
5138 
5139  /* Free the allocated rpi headers. */
5141  lpfc_sli4_remove_rpis(phba);
5142 
5143  /* Free eligible FCF index bmask */
5144  kfree(phba->fcf.fcf_rr_bmask);
5145 
5146  /* Free the ELS sgl list */
5147  lpfc_free_active_sgl(phba);
5148  lpfc_free_els_sgl_list(phba);
5149 
5150  /* Free the completion queue EQ event pool */
5151  lpfc_sli4_cq_event_release_all(phba);
5152  lpfc_sli4_cq_event_pool_destroy(phba);
5153 
5154  /* Release resource identifiers. */
5156 
5157  /* Free the bsmbx region. */
5158  lpfc_destroy_bootstrap_mbox(phba);
5159 
5160  /* Free the SLI Layer memory with SLI4 HBAs */
5161  lpfc_mem_free_all(phba);
5162 
5163  /* Free the current connect table */
5164  list_for_each_entry_safe(conn_entry, next_conn_entry,
5165  &phba->fcf_conn_rec_list, list) {
5166  list_del_init(&conn_entry->list);
5167  kfree(conn_entry);
5168  }
5169 
5170  return;
5171 }
5172 
5183 int
5185 {
5189  switch (dev_grp) {
5190  case LPFC_PCI_DEV_LP:
5191  phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5192  phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5193  phba->lpfc_stop_port = lpfc_stop_port_s3;
5194  break;
5195  case LPFC_PCI_DEV_OC:
5196  phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5197  phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5198  phba->lpfc_stop_port = lpfc_stop_port_s4;
5199  break;
5200  default:
5202  "1431 Invalid HBA PCI-device group: 0x%x\n",
5203  dev_grp);
5204  return -ENODEV;
5205  break;
5206  }
5207  return 0;
5208 }
5209 
5221 static int
5222 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5223 {
5224  /*
5225  * Driver resources common to all SLI revisions
5226  */
5227  atomic_set(&phba->fast_event_count, 0);
5228  spin_lock_init(&phba->hbalock);
5229 
5230  /* Initialize ndlp management spinlock */
5231  spin_lock_init(&phba->ndlp_lock);
5232 
5233  INIT_LIST_HEAD(&phba->port_list);
5234  INIT_LIST_HEAD(&phba->work_list);
5236 
5237  /* Initialize the wait queue head for the kernel thread */
5239 
5240  /* Initialize the scsi buffer list used by driver for scsi IO */
5242  INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
5243 
5244  /* Initialize the fabric iocb list */
5245  INIT_LIST_HEAD(&phba->fabric_iocb_list);
5246 
5247  /* Initialize list to save ELS buffers */
5248  INIT_LIST_HEAD(&phba->elsbuf);
5249 
5250  /* Initialize FCF connection rec list */
5251  INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5252 
5253  return 0;
5254 }
5255 
5267 static int
5268 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5269 {
5270  int error;
5271 
5272  /* Startup the kernel thread for this host adapter. */
5273  phba->worker_thread = kthread_run(lpfc_do_work, phba,
5274  "lpfc_worker_%d", phba->brd_no);
5275  if (IS_ERR(phba->worker_thread)) {
5276  error = PTR_ERR(phba->worker_thread);
5277  return error;
5278  }
5279 
5280  return 0;
5281 }
5282 
5291 static void
5292 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5293 {
5294  /* Stop kernel worker thread */
5295  kthread_stop(phba->worker_thread);
5296 }
5297 
5304 static void
5305 lpfc_free_iocb_list(struct lpfc_hba *phba)
5306 {
5307  struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5308 
5309  spin_lock_irq(&phba->hbalock);
5310  list_for_each_entry_safe(iocbq_entry, iocbq_next,
5311  &phba->lpfc_iocb_list, list) {
5312  list_del(&iocbq_entry->list);
5313  kfree(iocbq_entry);
5314  phba->total_iocbq_bufs--;
5315  }
5316  spin_unlock_irq(&phba->hbalock);
5317 
5318  return;
5319 }
5320 
5332 static int
5333 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5334 {
5335  struct lpfc_iocbq *iocbq_entry = NULL;
5336  uint16_t iotag;
5337  int i;
5338 
5339  /* Initialize and populate the iocb list per host. */
5340  INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5341  for (i = 0; i < iocb_count; i++) {
5342  iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5343  if (iocbq_entry == NULL) {
5344  printk(KERN_ERR "%s: only allocated %d iocbs of "
5345  "expected %d count. Unloading driver.\n",
5346  __func__, i, LPFC_IOCB_LIST_CNT);
5347  goto out_free_iocbq;
5348  }
5349 
5350  iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5351  if (iotag == 0) {
5352  kfree(iocbq_entry);
5353  printk(KERN_ERR "%s: failed to allocate IOTAG. "
5354  "Unloading driver.\n", __func__);
5355  goto out_free_iocbq;
5356  }
5357  iocbq_entry->sli4_lxritag = NO_XRI;
5358  iocbq_entry->sli4_xritag = NO_XRI;
5359 
5360  spin_lock_irq(&phba->hbalock);
5361  list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5362  phba->total_iocbq_bufs++;
5363  spin_unlock_irq(&phba->hbalock);
5364  }
5365 
5366  return 0;
5367 
5368 out_free_iocbq:
5369  lpfc_free_iocb_list(phba);
5370 
5371  return -ENOMEM;
5372 }
5373 
5381 void
5382 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5383 {
5384  struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5385 
5386  list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5387  list_del(&sglq_entry->list);
5388  lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5389  kfree(sglq_entry);
5390  }
5391 }
5392 
5399 static void
5400 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5401 {
5402  LIST_HEAD(sglq_list);
5403 
5404  /* Retrieve all els sgls from driver list */
5405  spin_lock_irq(&phba->hbalock);
5406  list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5407  spin_unlock_irq(&phba->hbalock);
5408 
5409  /* Now free the sgl list */
5410  lpfc_free_sgl_list(phba, &sglq_list);
5411 }
5412 
5420 static int
5421 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5422 {
5423  int size;
5424  size = sizeof(struct lpfc_sglq *);
5425  size *= phba->sli4_hba.max_cfg_param.max_xri;
5426 
5427  phba->sli4_hba.lpfc_sglq_active_list =
5428  kzalloc(size, GFP_KERNEL);
5429  if (!phba->sli4_hba.lpfc_sglq_active_list)
5430  return -ENOMEM;
5431  return 0;
5432 }
5433 
5442 static void
5443 lpfc_free_active_sgl(struct lpfc_hba *phba)
5444 {
5445  kfree(phba->sli4_hba.lpfc_sglq_active_list);
5446 }
5447 
5456 static void
5457 lpfc_init_sgl_list(struct lpfc_hba *phba)
5458 {
5459  /* Initialize and populate the sglq list per host/VF. */
5460  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5461  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5462 
5463  /* els xri-sgl book keeping */
5464  phba->sli4_hba.els_xri_cnt = 0;
5465 
5466  /* scsi xri-buffer book keeping */
5467  phba->sli4_hba.scsi_xri_cnt = 0;
5468 }
5469 
5484 int
5486 {
5487  int rc = 0;
5488  struct lpfc_rpi_hdr *rpi_hdr;
5489 
5490  INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5491  if (!phba->sli4_hba.rpi_hdrs_in_use)
5492  return rc;
5493  if (phba->sli4_hba.extents_in_use)
5494  return -EIO;
5495 
5496  rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5497  if (!rpi_hdr) {
5499  "0391 Error during rpi post operation\n");
5500  lpfc_sli4_remove_rpis(phba);
5501  rc = -ENODEV;
5502  }
5503 
5504  return rc;
5505 }
5506 
5520 struct lpfc_rpi_hdr *
5522 {
5523  uint16_t rpi_limit, curr_rpi_range;
5524  struct lpfc_dmabuf *dmabuf;
5525  struct lpfc_rpi_hdr *rpi_hdr;
5526  uint32_t rpi_count;
5527 
5528  /*
5529  * If the SLI4 port supports extents, posting the rpi header isn't
5530  * required. Set the expected maximum count and let the actual value
5531  * get set when extents are fully allocated.
5532  */
5533  if (!phba->sli4_hba.rpi_hdrs_in_use)
5534  return NULL;
5535  if (phba->sli4_hba.extents_in_use)
5536  return NULL;
5537 
5538  /* The limit on the logical index is just the max_rpi count. */
5539  rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5540  phba->sli4_hba.max_cfg_param.max_rpi - 1;
5541 
5542  spin_lock_irq(&phba->hbalock);
5543  /*
5544  * Establish the starting RPI in this header block. The starting
5545  * rpi is normalized to a zero base because the physical rpi is
5546  * port based.
5547  */
5548  curr_rpi_range = phba->sli4_hba.next_rpi;
5549  spin_unlock_irq(&phba->hbalock);
5550 
5551  /*
5552  * The port has a limited number of rpis. The increment here
5553  * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5554  * and to allow the full max_rpi range per port.
5555  */
5556  if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5557  rpi_count = rpi_limit - curr_rpi_range;
5558  else
5559  rpi_count = LPFC_RPI_HDR_COUNT;
5560 
5561  if (!rpi_count)
5562  return NULL;
5563  /*
5564  * First allocate the protocol header region for the port. The
5565  * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5566  */
5567  dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5568  if (!dmabuf)
5569  return NULL;
5570 
5571  dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5573  &dmabuf->phys,
5574  GFP_KERNEL);
5575  if (!dmabuf->virt) {
5576  rpi_hdr = NULL;
5577  goto err_free_dmabuf;
5578  }
5579 
5580  memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5581  if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5582  rpi_hdr = NULL;
5583  goto err_free_coherent;
5584  }
5585 
5586  /* Save the rpi header data for cleanup later. */
5587  rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5588  if (!rpi_hdr)
5589  goto err_free_coherent;
5590 
5591  rpi_hdr->dmabuf = dmabuf;
5592  rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5593  rpi_hdr->page_count = 1;
5594  spin_lock_irq(&phba->hbalock);
5595 
5596  /* The rpi_hdr stores the logical index only. */
5597  rpi_hdr->start_rpi = curr_rpi_range;
5598  list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5599 
5600  /*
5601  * The next_rpi stores the next logical module-64 rpi value used
5602  * to post physical rpis in subsequent rpi postings.
5603  */
5604  phba->sli4_hba.next_rpi += rpi_count;
5605  spin_unlock_irq(&phba->hbalock);
5606  return rpi_hdr;
5607 
5608  err_free_coherent:
5610  dmabuf->virt, dmabuf->phys);
5611  err_free_dmabuf:
5612  kfree(dmabuf);
5613  return NULL;
5614 }
5615 
5625 void
5627 {
5628  struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5629 
5630  if (!phba->sli4_hba.rpi_hdrs_in_use)
5631  goto exit;
5632 
5633  list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5634  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5635  list_del(&rpi_hdr->list);
5636  dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5637  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5638  kfree(rpi_hdr->dmabuf);
5639  kfree(rpi_hdr);
5640  }
5641  exit:
5642  /* There are no rpis available to the port now. */
5643  phba->sli4_hba.next_rpi = 0;
5644 }
5645 
5658 static struct lpfc_hba *
5659 lpfc_hba_alloc(struct pci_dev *pdev)
5660 {
5661  struct lpfc_hba *phba;
5662 
5663  /* Allocate memory for HBA structure */
5664  phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5665  if (!phba) {
5666  dev_err(&pdev->dev, "failed to allocate hba struct\n");
5667  return NULL;
5668  }
5669 
5670  /* Set reference to PCI device in HBA structure */
5671  phba->pcidev = pdev;
5672 
5673  /* Assign an unused board number */
5674  phba->brd_no = lpfc_get_instance();
5675  if (phba->brd_no < 0) {
5676  kfree(phba);
5677  return NULL;
5678  }
5679 
5680  spin_lock_init(&phba->ct_ev_lock);
5681  INIT_LIST_HEAD(&phba->ct_ev_waiters);
5682 
5683  return phba;
5684 }
5685 
5693 static void
5694 lpfc_hba_free(struct lpfc_hba *phba)
5695 {
5696  /* Release the driver assigned board number */
5697  idr_remove(&lpfc_hba_index, phba->brd_no);
5698 
5699  /* Free memory allocated with sli rings */
5700  kfree(phba->sli.ring);
5701  phba->sli.ring = NULL;
5702 
5703  kfree(phba);
5704  return;
5705 }
5706 
5718 static int
5719 lpfc_create_shost(struct lpfc_hba *phba)
5720 {
5721  struct lpfc_vport *vport;
5722  struct Scsi_Host *shost;
5723 
5724  /* Initialize HBA FC structure */
5725  phba->fc_edtov = FF_DEF_EDTOV;
5726  phba->fc_ratov = FF_DEF_RATOV;
5727  phba->fc_altov = FF_DEF_ALTOV;
5728  phba->fc_arbtov = FF_DEF_ARBTOV;
5729 
5730  atomic_set(&phba->sdev_cnt, 0);
5731  vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5732  if (!vport)
5733  return -ENODEV;
5734 
5735  shost = lpfc_shost_from_vport(vport);
5736  phba->pport = vport;
5737  lpfc_debugfs_initialize(vport);
5738  /* Put reference to SCSI host to driver's device private data */
5739  pci_set_drvdata(phba->pcidev, shost);
5740 
5741  return 0;
5742 }
5743 
5751 static void
5752 lpfc_destroy_shost(struct lpfc_hba *phba)
5753 {
5754  struct lpfc_vport *vport = phba->pport;
5755 
5756  /* Destroy physical port that associated with the SCSI host */
5757  destroy_port(vport);
5758 
5759  return;
5760 }
5761 
5770 static void
5771 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5772 {
5773  uint32_t old_mask;
5774  uint32_t old_guard;
5775 
5776  int pagecnt = 10;
5779  "1478 Registering BlockGuard with the "
5780  "SCSI layer\n");
5781 
5782  old_mask = lpfc_prot_mask;
5783  old_guard = lpfc_prot_guard;
5784 
5785  /* Only allow supported values */
5790 
5791  /* DIF Type 1 protection for profiles AST1/C1 is end to end */
5794 
5796  if ((old_mask != lpfc_prot_mask) ||
5797  (old_guard != lpfc_prot_guard))
5799  "1475 Registering BlockGuard with the "
5800  "SCSI layer: mask %d guard %d\n",
5802 
5803  scsi_host_set_prot(shost, lpfc_prot_mask);
5804  scsi_host_set_guard(shost, lpfc_prot_guard);
5805  } else
5807  "1479 Not Registering BlockGuard with the SCSI "
5808  "layer, Bad protection parameters: %d %d\n",
5809  old_mask, old_guard);
5810  }
5811 
5812  if (!_dump_buf_data) {
5813  while (pagecnt) {
5814  spin_lock_init(&_dump_buf_lock);
5815  _dump_buf_data =
5816  (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5817  if (_dump_buf_data) {
5819  "9043 BLKGRD: allocated %d pages for "
5820  "_dump_buf_data at 0x%p\n",
5821  (1 << pagecnt), _dump_buf_data);
5822  _dump_buf_data_order = pagecnt;
5824  ((1 << PAGE_SHIFT) << pagecnt));
5825  break;
5826  } else
5827  --pagecnt;
5828  }
5829  if (!_dump_buf_data_order)
5831  "9044 BLKGRD: ERROR unable to allocate "
5832  "memory for hexdump\n");
5833  } else
5835  "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5836  "\n", _dump_buf_data);
5837  if (!_dump_buf_dif) {
5838  while (pagecnt) {
5839  _dump_buf_dif =
5840  (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5841  if (_dump_buf_dif) {
5843  "9046 BLKGRD: allocated %d pages for "
5844  "_dump_buf_dif at 0x%p\n",
5845  (1 << pagecnt), _dump_buf_dif);
5846  _dump_buf_dif_order = pagecnt;
5847  memset(_dump_buf_dif, 0,
5848  ((1 << PAGE_SHIFT) << pagecnt));
5849  break;
5850  } else
5851  --pagecnt;
5852  }
5853  if (!_dump_buf_dif_order)
5855  "9047 BLKGRD: ERROR unable to allocate "
5856  "memory for hexdump\n");
5857  } else
5859  "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5860  _dump_buf_dif);
5861 }
5862 
5870 static void
5871 lpfc_post_init_setup(struct lpfc_hba *phba)
5872 {
5873  struct Scsi_Host *shost;
5874  struct lpfc_adapter_event_header adapter_event;
5875 
5876  /* Get the default values for Model Name and Description */
5877  lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5878 
5879  /*
5880  * hba setup may have changed the hba_queue_depth so we need to
5881  * adjust the value of can_queue.
5882  */
5883  shost = pci_get_drvdata(phba->pcidev);
5884  shost->can_queue = phba->cfg_hba_queue_depth - 10;
5885  if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5886  lpfc_setup_bg(phba, shost);
5887 
5888  lpfc_host_attrib_init(shost);
5889 
5890  if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5891  spin_lock_irq(shost->host_lock);
5892  lpfc_poll_start_timer(phba);
5893  spin_unlock_irq(shost->host_lock);
5894  }
5895 
5897  "0428 Perform SCSI scan\n");
5898  /* Send board arrival event to upper layer */
5899  adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5900  adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5902  sizeof(adapter_event),
5903  (char *) &adapter_event,
5905  return;
5906 }
5907 
5919 static int
5920 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5921 {
5922  struct pci_dev *pdev;
5923  unsigned long bar0map_len, bar2map_len;
5924  int i, hbq_count;
5925  void *ptr;
5926  int error = -ENODEV;
5927 
5928  /* Obtain PCI device reference */
5929  if (!phba->pcidev)
5930  return error;
5931  else
5932  pdev = phba->pcidev;
5933 
5934  /* Set the device DMA mask size */
5935  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5936  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5937  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5938  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5939  return error;
5940  }
5941  }
5942 
5943  /* Get the bus address of Bar0 and Bar2 and the number of bytes
5944  * required by each mapping.
5945  */
5946  phba->pci_bar0_map = pci_resource_start(pdev, 0);
5947  bar0map_len = pci_resource_len(pdev, 0);
5948 
5949  phba->pci_bar2_map = pci_resource_start(pdev, 2);
5950  bar2map_len = pci_resource_len(pdev, 2);
5951 
5952  /* Map HBA SLIM to a kernel virtual address. */
5953  phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5954  if (!phba->slim_memmap_p) {
5955  dev_printk(KERN_ERR, &pdev->dev,
5956  "ioremap failed for SLIM memory.\n");
5957  goto out;
5958  }
5959 
5960  /* Map HBA Control Registers to a kernel virtual address. */
5961  phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5962  if (!phba->ctrl_regs_memmap_p) {
5963  dev_printk(KERN_ERR, &pdev->dev,
5964  "ioremap failed for HBA control registers.\n");
5965  goto out_iounmap_slim;
5966  }
5967 
5968  /* Allocate memory for SLI-2 structures */
5969  phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5971  &phba->slim2p.phys,
5972  GFP_KERNEL);
5973  if (!phba->slim2p.virt)
5974  goto out_iounmap;
5975 
5976  memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5977  phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5978  phba->mbox_ext = (phba->slim2p.virt +
5979  offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5980  phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5981  phba->IOCBs = (phba->slim2p.virt +
5982  offsetof(struct lpfc_sli2_slim, IOCBs));
5983 
5984  phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5986  &phba->hbqslimp.phys,
5987  GFP_KERNEL);
5988  if (!phba->hbqslimp.virt)
5989  goto out_free_slim;
5990 
5991  hbq_count = lpfc_sli_hbq_count();
5992  ptr = phba->hbqslimp.virt;
5993  for (i = 0; i < hbq_count; ++i) {
5994  phba->hbqs[i].hbq_virt = ptr;
5995  INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5996  ptr += (lpfc_hbq_defs[i]->entry_count *
5997  sizeof(struct lpfc_hbq_entry));
5998  }
5999  phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6000  phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6001 
6002  memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6003 
6004  INIT_LIST_HEAD(&phba->rb_pend_list);
6005 
6006  phba->MBslimaddr = phba->slim_memmap_p;
6007  phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6008  phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6009  phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6010  phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6011 
6012  return 0;
6013 
6014 out_free_slim:
6016  phba->slim2p.virt, phba->slim2p.phys);
6017 out_iounmap:
6018  iounmap(phba->ctrl_regs_memmap_p);
6019 out_iounmap_slim:
6020  iounmap(phba->slim_memmap_p);
6021 out:
6022  return error;
6023 }
6024 
6032 static void
6033 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6034 {
6035  struct pci_dev *pdev;
6036 
6037  /* Obtain PCI device reference */
6038  if (!phba->pcidev)
6039  return;
6040  else
6041  pdev = phba->pcidev;
6042 
6043  /* Free coherent DMA memory allocated */
6045  phba->hbqslimp.virt, phba->hbqslimp.phys);
6047  phba->slim2p.virt, phba->slim2p.phys);
6048 
6049  /* I/O memory unmap */
6050  iounmap(phba->ctrl_regs_memmap_p);
6051  iounmap(phba->slim_memmap_p);
6052 
6053  return;
6054 }
6055 
6065 int
6067 {
6068  struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6069  struct lpfc_register reg_data;
6070  int i, port_error = 0;
6071  uint32_t if_type;
6072 
6073  memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6074  memset(&reg_data, 0, sizeof(reg_data));
6075  if (!phba->sli4_hba.PSMPHRregaddr)
6076  return -ENODEV;
6077 
6078  /* Wait up to 30 seconds for the SLI Port POST done and ready */
6079  for (i = 0; i < 3000; i++) {
6080  if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6081  &portsmphr_reg.word0) ||
6082  (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6083  /* Port has a fatal POST error, break out */
6084  port_error = -ENODEV;
6085  break;
6086  }
6088  bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6089  break;
6090  msleep(10);
6091  }
6092 
6093  /*
6094  * If there was a port error during POST, then don't proceed with
6095  * other register reads as the data may not be valid. Just exit.
6096  */
6097  if (port_error) {
6099  "1408 Port Failed POST - portsmphr=0x%x, "
6100  "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6101  "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6102  portsmphr_reg.word0,
6103  bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6104  bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6105  bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6106  bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6107  bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6108  bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6109  bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6110  bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6111  } else {
6113  "2534 Device Info: SLIFamily=0x%x, "
6114  "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6115  "SLIHint_2=0x%x, FT=0x%x\n",
6116  bf_get(lpfc_sli_intf_sli_family,
6117  &phba->sli4_hba.sli_intf),
6118  bf_get(lpfc_sli_intf_slirev,
6119  &phba->sli4_hba.sli_intf),
6120  bf_get(lpfc_sli_intf_if_type,
6121  &phba->sli4_hba.sli_intf),
6122  bf_get(lpfc_sli_intf_sli_hint1,
6123  &phba->sli4_hba.sli_intf),
6124  bf_get(lpfc_sli_intf_sli_hint2,
6125  &phba->sli4_hba.sli_intf),
6126  bf_get(lpfc_sli_intf_func_type,
6127  &phba->sli4_hba.sli_intf));
6128  /*
6129  * Check for other Port errors during the initialization
6130  * process. Fail the load if the port did not come up
6131  * correctly.
6132  */
6133  if_type = bf_get(lpfc_sli_intf_if_type,
6134  &phba->sli4_hba.sli_intf);
6135  switch (if_type) {
6137  phba->sli4_hba.ue_mask_lo =
6138  readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6139  phba->sli4_hba.ue_mask_hi =
6140  readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6141  uerrlo_reg.word0 =
6142  readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6143  uerrhi_reg.word0 =
6144  readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6145  if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6146  (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6148  "1422 Unrecoverable Error "
6149  "Detected during POST "
6150  "uerr_lo_reg=0x%x, "
6151  "uerr_hi_reg=0x%x, "
6152  "ue_mask_lo_reg=0x%x, "
6153  "ue_mask_hi_reg=0x%x\n",
6154  uerrlo_reg.word0,
6155  uerrhi_reg.word0,
6156  phba->sli4_hba.ue_mask_lo,
6157  phba->sli4_hba.ue_mask_hi);
6158  port_error = -ENODEV;
6159  }
6160  break;
6162  /* Final checks. The port status should be clean. */
6163  if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6164  &reg_data.word0) ||
6165  (bf_get(lpfc_sliport_status_err, &reg_data) &&
6166  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
6167  phba->work_status[0] =
6168  readl(phba->sli4_hba.u.if_type2.
6169  ERR1regaddr);
6170  phba->work_status[1] =
6171  readl(phba->sli4_hba.u.if_type2.
6172  ERR2regaddr);
6174  "2888 Unrecoverable port error "
6175  "following POST: port status reg "
6176  "0x%x, port_smphr reg 0x%x, "
6177  "error 1=0x%x, error 2=0x%x\n",
6178  reg_data.word0,
6179  portsmphr_reg.word0,
6180  phba->work_status[0],
6181  phba->work_status[1]);
6182  port_error = -ENODEV;
6183  }
6184  break;
6186  default:
6187  break;
6188  }
6189  }
6190  return port_error;
6191 }
6192 
6201 static void
6202 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6203 {
6204  switch (if_type) {
6206  phba->sli4_hba.u.if_type0.UERRLOregaddr =
6207  phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6208  phba->sli4_hba.u.if_type0.UERRHIregaddr =
6209  phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6210  phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6211  phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6212  phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6213  phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6214  phba->sli4_hba.SLIINTFregaddr =
6215  phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6216  break;
6218  phba->sli4_hba.u.if_type2.ERR1regaddr =
6219  phba->sli4_hba.conf_regs_memmap_p +
6221  phba->sli4_hba.u.if_type2.ERR2regaddr =
6222  phba->sli4_hba.conf_regs_memmap_p +
6224  phba->sli4_hba.u.if_type2.CTRLregaddr =
6225  phba->sli4_hba.conf_regs_memmap_p +
6227  phba->sli4_hba.u.if_type2.STATUSregaddr =
6228  phba->sli4_hba.conf_regs_memmap_p +
6230  phba->sli4_hba.SLIINTFregaddr =
6231  phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6232  phba->sli4_hba.PSMPHRregaddr =
6233  phba->sli4_hba.conf_regs_memmap_p +
6235  phba->sli4_hba.RQDBregaddr =
6236  phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
6237  phba->sli4_hba.WQDBregaddr =
6238  phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
6239  phba->sli4_hba.EQCQDBregaddr =
6240  phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6241  phba->sli4_hba.MQDBregaddr =
6242  phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6243  phba->sli4_hba.BMBXregaddr =
6244  phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6245  break;
6247  default:
6248  dev_printk(KERN_ERR, &phba->pcidev->dev,
6249  "FATAL - unsupported SLI4 interface type - %d\n",
6250  if_type);
6251  break;
6252  }
6253 }
6254 
6262 static void
6263 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6264 {
6265  phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6267  phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6268  LPFC_HST_ISR0;
6269  phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6270  LPFC_HST_IMR0;
6271  phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6273 }
6274 
6285 static int
6286 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6287 {
6288  if (vf > LPFC_VIR_FUNC_MAX)
6289  return -ENODEV;
6290 
6291  phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6293  phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6295  phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6297  phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6299  phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6300  vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6301  return 0;
6302 }
6303 
6319 static int
6320 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6321 {
6322  uint32_t bmbx_size;
6323  struct lpfc_dmabuf *dmabuf;
6324  struct dma_address *dma_address;
6325  uint32_t pa_addr;
6327 
6328  dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6329  if (!dmabuf)
6330  return -ENOMEM;
6331 
6332  /*
6333  * The bootstrap mailbox region is comprised of 2 parts
6334  * plus an alignment restriction of 16 bytes.
6335  */
6336  bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6337  dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6338  bmbx_size,
6339  &dmabuf->phys,
6340  GFP_KERNEL);
6341  if (!dmabuf->virt) {
6342  kfree(dmabuf);
6343  return -ENOMEM;
6344  }
6345  memset(dmabuf->virt, 0, bmbx_size);
6346 
6347  /*
6348  * Initialize the bootstrap mailbox pointers now so that the register
6349  * operations are simple later. The mailbox dma address is required
6350  * to be 16-byte aligned. Also align the virtual memory as each
6351  * maibox is copied into the bmbx mailbox region before issuing the
6352  * command to the port.
6353  */
6354  phba->sli4_hba.bmbx.dmabuf = dmabuf;
6355  phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6356 
6357  phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6359  phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6361 
6362  /*
6363  * Set the high and low physical addresses now. The SLI4 alignment
6364  * requirement is 16 bytes and the mailbox is posted to the port
6365  * as two 30-bit addresses. The other data is a bit marking whether
6366  * the 30-bit address is the high or low address.
6367  * Upcast bmbx aphys to 64bits so shift instruction compiles
6368  * clean on 32 bit machines.
6369  */
6370  dma_address = &phba->sli4_hba.bmbx.dma_address;
6371  phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6372  pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6373  dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6375 
6376  pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6377  dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6379  return 0;
6380 }
6381 
6393 static void
6394 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6395 {
6396  dma_free_coherent(&phba->pcidev->dev,
6397  phba->sli4_hba.bmbx.bmbx_size,
6398  phba->sli4_hba.bmbx.dmabuf->virt,
6399  phba->sli4_hba.bmbx.dmabuf->phys);
6400 
6401  kfree(phba->sli4_hba.bmbx.dmabuf);
6402  memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6403 }
6404 
6419 int
6421 {
6422  LPFC_MBOXQ_t *pmb;
6423  struct lpfc_mbx_read_config *rd_config;
6424  union lpfc_sli4_cfg_shdr *shdr;
6425  uint32_t shdr_status, shdr_add_status;
6426  struct lpfc_mbx_get_func_cfg *get_func_cfg;
6427  struct lpfc_rsrc_desc_fcfcoe *desc;
6428  char *pdesc_0;
6430  int length, i, rc = 0, rc2;
6431 
6433  if (!pmb) {
6435  "2011 Unable to allocate memory for issuing "
6436  "SLI_CONFIG_SPECIAL mailbox command\n");
6437  return -ENOMEM;
6438  }
6439 
6440  lpfc_read_config(phba, pmb);
6441 
6442  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6443  if (rc != MBX_SUCCESS) {
6445  "2012 Mailbox failed , mbxCmd x%x "
6446  "READ_CONFIG, mbxStatus x%x\n",
6447  bf_get(lpfc_mqe_command, &pmb->u.mqe),
6448  bf_get(lpfc_mqe_status, &pmb->u.mqe));
6449  rc = -EIO;
6450  } else {
6451  rd_config = &pmb->u.mqe.un.rd_config;
6452  if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6453  phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6454  phba->sli4_hba.lnk_info.lnk_tp =
6455  bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6456  phba->sli4_hba.lnk_info.lnk_no =
6457  bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6459  "3081 lnk_type:%d, lnk_numb:%d\n",
6460  phba->sli4_hba.lnk_info.lnk_tp,
6461  phba->sli4_hba.lnk_info.lnk_no);
6462  } else
6464  "3082 Mailbox (x%x) returned ldv:x0\n",
6465  bf_get(lpfc_mqe_command, &pmb->u.mqe));
6466  phba->sli4_hba.extents_in_use =
6467  bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6468  phba->sli4_hba.max_cfg_param.max_xri =
6469  bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6470  phba->sli4_hba.max_cfg_param.xri_base =
6471  bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6472  phba->sli4_hba.max_cfg_param.max_vpi =
6473  bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6474  phba->sli4_hba.max_cfg_param.vpi_base =
6475  bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6476  phba->sli4_hba.max_cfg_param.max_rpi =
6477  bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6478  phba->sli4_hba.max_cfg_param.rpi_base =
6479  bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6480  phba->sli4_hba.max_cfg_param.max_vfi =
6481  bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6482  phba->sli4_hba.max_cfg_param.vfi_base =
6483  bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6484  phba->sli4_hba.max_cfg_param.max_fcfi =
6485  bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6486  phba->sli4_hba.max_cfg_param.max_eq =
6487  bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6488  phba->sli4_hba.max_cfg_param.max_rq =
6489  bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6490  phba->sli4_hba.max_cfg_param.max_wq =
6491  bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6492  phba->sli4_hba.max_cfg_param.max_cq =
6493  bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6494  phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6495  phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6496  phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6497  phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6498  phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6499  (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6500  phba->max_vports = phba->max_vpi;
6502  "2003 cfg params Extents? %d "
6503  "XRI(B:%d M:%d), "
6504  "VPI(B:%d M:%d) "
6505  "VFI(B:%d M:%d) "
6506  "RPI(B:%d M:%d) "
6507  "FCFI(Count:%d)\n",
6508  phba->sli4_hba.extents_in_use,
6509  phba->sli4_hba.max_cfg_param.xri_base,
6510  phba->sli4_hba.max_cfg_param.max_xri,
6511  phba->sli4_hba.max_cfg_param.vpi_base,
6512  phba->sli4_hba.max_cfg_param.max_vpi,
6513  phba->sli4_hba.max_cfg_param.vfi_base,
6514  phba->sli4_hba.max_cfg_param.max_vfi,
6515  phba->sli4_hba.max_cfg_param.rpi_base,
6516  phba->sli4_hba.max_cfg_param.max_rpi,
6517  phba->sli4_hba.max_cfg_param.max_fcfi);
6518  }
6519 
6520  if (rc)
6521  goto read_cfg_out;
6522 
6523  /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6524  if (phba->cfg_hba_queue_depth >
6525  (phba->sli4_hba.max_cfg_param.max_xri -
6527  phba->cfg_hba_queue_depth =
6528  phba->sli4_hba.max_cfg_param.max_xri -
6530 
6531  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6533  goto read_cfg_out;
6534 
6535  /* get the pf# and vf# for SLI4 if_type 2 port */
6536  length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6537  sizeof(struct lpfc_sli4_cfg_mhdr));
6540  length, LPFC_SLI4_MBX_EMBED);
6541 
6542  rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6543  shdr = (union lpfc_sli4_cfg_shdr *)
6544  &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6545  shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6546  shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6547  if (rc2 || shdr_status || shdr_add_status) {
6549  "3026 Mailbox failed , mbxCmd x%x "
6550  "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6551  bf_get(lpfc_mqe_command, &pmb->u.mqe),
6552  bf_get(lpfc_mqe_status, &pmb->u.mqe));
6553  goto read_cfg_out;
6554  }
6555 
6556  /* search for fc_fcoe resrouce descriptor */
6557  get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6558  desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6559 
6560  pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6561  desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6562  length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6563  if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6565  else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6566  goto read_cfg_out;
6567 
6568  for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6569  desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6571  bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6572  phba->sli4_hba.iov.pf_number =
6573  bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6574  phba->sli4_hba.iov.vf_number =
6575  bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6576  break;
6577  }
6578  }
6579 
6580  if (i < LPFC_RSRC_DESC_MAX_NUM)
6582  "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6583  "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6584  phba->sli4_hba.iov.vf_number);
6585  else
6587  "3028 GET_FUNCTION_CONFIG: failed to find "
6588  "Resrouce Descriptor:x%x\n",
6590 
6591 read_cfg_out:
6592  mempool_free(pmb, phba->mbox_mem_pool);
6593  return rc;
6594 }
6595 
6609 static int
6610 lpfc_setup_endian_order(struct lpfc_hba *phba)
6611 {
6612  LPFC_MBOXQ_t *mboxq;
6613  uint32_t if_type, rc = 0;
6614  uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6616 
6617  if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6618  switch (if_type) {
6620  mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6621  GFP_KERNEL);
6622  if (!mboxq) {
6624  "0492 Unable to allocate memory for "
6625  "issuing SLI_CONFIG_SPECIAL mailbox "
6626  "command\n");
6627  return -ENOMEM;
6628  }
6629 
6630  /*
6631  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6632  * two words to contain special data values and no other data.
6633  */
6634  memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6635  memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6636  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6637  if (rc != MBX_SUCCESS) {
6639  "0493 SLI_CONFIG_SPECIAL mailbox "
6640  "failed with status x%x\n",
6641  rc);
6642  rc = -EIO;
6643  }
6644  mempool_free(mboxq, phba->mbox_mem_pool);
6645  break;
6648  default:
6649  break;
6650  }
6651  return rc;
6652 }
6653 
6667 static int
6668 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6669 {
6670  int cfg_fcp_io_channel;
6671  uint32_t cpu;
6672  uint32_t i = 0;
6673 
6674 
6675  /*
6676  * Sanity check for configured queue parameters against the run-time
6677  * device parameters
6678  */
6679 
6680  /* Sanity check on HBA EQ parameters */
6681  cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6682 
6683  /* It doesn't make sense to have more io channels then CPUs */
6684  for_each_online_cpu(cpu) {
6685  i++;
6686  }
6687  if (i < cfg_fcp_io_channel) {
6688  lpfc_printf_log(phba,
6689  KERN_ERR, LOG_INIT,
6690  "3188 Reducing IO channels to match number of "
6691  "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6692  cfg_fcp_io_channel = i;
6693  }
6694 
6695  if (cfg_fcp_io_channel >
6696  phba->sli4_hba.max_cfg_param.max_eq) {
6697  if (phba->sli4_hba.max_cfg_param.max_eq <
6700  "2574 Not enough EQs (%d) from the "
6701  "pci function for supporting FCP "
6702  "EQs (%d)\n",
6703  phba->sli4_hba.max_cfg_param.max_eq,
6704  phba->cfg_fcp_io_channel);
6705  goto out_error;
6706  }
6708  "2575 Reducing IO channels to match number of "
6709  "available EQs: from %d to %d\n",
6710  cfg_fcp_io_channel,
6711  phba->sli4_hba.max_cfg_param.max_eq);
6712  cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6713  }
6714 
6715  /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6716 
6717  /* The actual number of FCP event queues adopted */
6718  phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6719  phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6720  phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6721 
6722  /* Get EQ depth from module parameter, fake the default for now */
6723  phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6724  phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6725 
6726  /* Get CQ depth from module parameter, fake the default for now */
6727  phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6728  phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6729 
6730  return 0;
6731 out_error:
6732  return -ENOMEM;
6733 }
6734 
6749 int
6751 {
6752  struct lpfc_queue *qdesc;
6753  int idx;
6754 
6755  /*
6756  * Create HBA Record arrays.
6757  */
6758  if (!phba->cfg_fcp_io_channel)
6759  return -ERANGE;
6760 
6761  phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6762  phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6763  phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6764  phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6765  phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6766  phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6767 
6768  phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6769  phba->cfg_fcp_io_channel), GFP_KERNEL);
6770  if (!phba->sli4_hba.hba_eq) {
6772  "2576 Failed allocate memory for "
6773  "fast-path EQ record array\n");
6774  goto out_error;
6775  }
6776 
6777  phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6778  phba->cfg_fcp_io_channel), GFP_KERNEL);
6779  if (!phba->sli4_hba.fcp_cq) {
6781  "2577 Failed allocate memory for fast-path "
6782  "CQ record array\n");
6783  goto out_error;
6784  }
6785 
6786  phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6787  phba->cfg_fcp_io_channel), GFP_KERNEL);
6788  if (!phba->sli4_hba.fcp_wq) {
6790  "2578 Failed allocate memory for fast-path "
6791  "WQ record array\n");
6792  goto out_error;
6793  }
6794 
6795  /*
6796  * Since the first EQ can have multiple CQs associated with it,
6797  * this array is used to quickly see if we have a FCP fast-path
6798  * CQ match.
6799  */
6800  phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6801  phba->cfg_fcp_io_channel), GFP_KERNEL);
6802  if (!phba->sli4_hba.fcp_cq_map) {
6804  "2545 Failed allocate memory for fast-path "
6805  "CQ map\n");
6806  goto out_error;
6807  }
6808 
6809  /*
6810  * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6811  * how many EQs to create.
6812  */
6813  for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6814 
6815  /* Create EQs */
6816  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6817  phba->sli4_hba.eq_ecount);
6818  if (!qdesc) {
6820  "0497 Failed allocate EQ (%d)\n", idx);
6821  goto out_error;
6822  }
6823  phba->sli4_hba.hba_eq[idx] = qdesc;
6824 
6825  /* Create Fast Path FCP CQs */
6826  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6827  phba->sli4_hba.cq_ecount);
6828  if (!qdesc) {
6830  "0499 Failed allocate fast-path FCP "
6831  "CQ (%d)\n", idx);
6832  goto out_error;
6833  }
6834  phba->sli4_hba.fcp_cq[idx] = qdesc;
6835 
6836  /* Create Fast Path FCP WQs */
6837  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6838  phba->sli4_hba.wq_ecount);
6839  if (!qdesc) {
6841  "0503 Failed allocate fast-path FCP "
6842  "WQ (%d)\n", idx);
6843  goto out_error;
6844  }
6845  phba->sli4_hba.fcp_wq[idx] = qdesc;
6846  }
6847 
6848 
6849  /*
6850  * Create Slow Path Completion Queues (CQs)
6851  */
6852 
6853  /* Create slow-path Mailbox Command Complete Queue */
6854  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6855  phba->sli4_hba.cq_ecount);
6856  if (!qdesc) {
6858  "0500 Failed allocate slow-path mailbox CQ\n");
6859  goto out_error;
6860  }
6861  phba->sli4_hba.mbx_cq = qdesc;
6862 
6863  /* Create slow-path ELS Complete Queue */
6864  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6865  phba->sli4_hba.cq_ecount);
6866  if (!qdesc) {
6868  "0501 Failed allocate slow-path ELS CQ\n");
6869  goto out_error;
6870  }
6871  phba->sli4_hba.els_cq = qdesc;
6872 
6873 
6874  /*
6875  * Create Slow Path Work Queues (WQs)
6876  */
6877 
6878  /* Create Mailbox Command Queue */
6879 
6880  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6881  phba->sli4_hba.mq_ecount);
6882  if (!qdesc) {
6884  "0505 Failed allocate slow-path MQ\n");
6885  goto out_error;
6886  }
6887  phba->sli4_hba.mbx_wq = qdesc;
6888 
6889  /*
6890  * Create ELS Work Queues
6891  */
6892 
6893  /* Create slow-path ELS Work Queue */
6894  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6895  phba->sli4_hba.wq_ecount);
6896  if (!qdesc) {
6898  "0504 Failed allocate slow-path ELS WQ\n");
6899  goto out_error;
6900  }
6901  phba->sli4_hba.els_wq = qdesc;
6902 
6903  /*
6904  * Create Receive Queue (RQ)
6905  */
6906 
6907  /* Create Receive Queue for header */
6908  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6909  phba->sli4_hba.rq_ecount);
6910  if (!qdesc) {
6912  "0506 Failed allocate receive HRQ\n");
6913  goto out_error;
6914  }
6915  phba->sli4_hba.hdr_rq = qdesc;
6916 
6917  /* Create Receive Queue for data */
6918  qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6919  phba->sli4_hba.rq_ecount);
6920  if (!qdesc) {
6922  "0507 Failed allocate receive DRQ\n");
6923  goto out_error;
6924  }
6925  phba->sli4_hba.dat_rq = qdesc;
6926 
6927  return 0;
6928 
6929 out_error:
6931  return -ENOMEM;
6932 }
6933 
6946 void
6948 {
6949  int idx;
6950 
6951  if (phba->sli4_hba.hba_eq != NULL) {
6952  /* Release HBA event queue */
6953  for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6954  if (phba->sli4_hba.hba_eq[idx] != NULL) {
6956  phba->sli4_hba.hba_eq[idx]);
6957  phba->sli4_hba.hba_eq[idx] = NULL;
6958  }
6959  }
6960  kfree(phba->sli4_hba.hba_eq);
6961  phba->sli4_hba.hba_eq = NULL;
6962  }
6963 
6964  if (phba->sli4_hba.fcp_cq != NULL) {
6965  /* Release FCP completion queue */
6966  for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6967  if (phba->sli4_hba.fcp_cq[idx] != NULL) {
6969  phba->sli4_hba.fcp_cq[idx]);
6970  phba->sli4_hba.fcp_cq[idx] = NULL;
6971  }
6972  }
6973  kfree(phba->sli4_hba.fcp_cq);
6974  phba->sli4_hba.fcp_cq = NULL;
6975  }
6976 
6977  if (phba->sli4_hba.fcp_wq != NULL) {
6978  /* Release FCP work queue */
6979  for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6980  if (phba->sli4_hba.fcp_wq[idx] != NULL) {
6982  phba->sli4_hba.fcp_wq[idx]);
6983  phba->sli4_hba.fcp_wq[idx] = NULL;
6984  }
6985  }
6986  kfree(phba->sli4_hba.fcp_wq);
6987  phba->sli4_hba.fcp_wq = NULL;
6988  }
6989 
6990  /* Release FCP CQ mapping array */
6991  if (phba->sli4_hba.fcp_cq_map != NULL) {
6992  kfree(phba->sli4_hba.fcp_cq_map);
6993  phba->sli4_hba.fcp_cq_map = NULL;
6994  }
6995 
6996  /* Release mailbox command work queue */
6997  if (phba->sli4_hba.mbx_wq != NULL) {
6998  lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6999  phba->sli4_hba.mbx_wq = NULL;
7000  }
7001 
7002  /* Release ELS work queue */
7003  if (phba->sli4_hba.els_wq != NULL) {
7004  lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7005  phba->sli4_hba.els_wq = NULL;
7006  }
7007 
7008  /* Release unsolicited receive queue */
7009  if (phba->sli4_hba.hdr_rq != NULL) {
7010  lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7011  phba->sli4_hba.hdr_rq = NULL;
7012  }
7013  if (phba->sli4_hba.dat_rq != NULL) {
7014  lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7015  phba->sli4_hba.dat_rq = NULL;
7016  }
7017 
7018  /* Release ELS complete queue */
7019  if (phba->sli4_hba.els_cq != NULL) {
7020  lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7021  phba->sli4_hba.els_cq = NULL;
7022  }
7023 
7024  /* Release mailbox command complete queue */
7025  if (phba->sli4_hba.mbx_cq != NULL) {
7026  lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7027  phba->sli4_hba.mbx_cq = NULL;
7028  }
7029 
7030  return;
7031 }
7032 
7045 int
7047 {
7048  struct lpfc_sli *psli = &phba->sli;
7049  struct lpfc_sli_ring *pring;
7050  int rc = -ENOMEM;
7051  int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7052  int fcp_cq_index = 0;
7053 
7054  /*
7055  * Set up HBA Event Queues (EQs)
7056  */
7057 
7058  /* Set up HBA event queue */
7059  if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7061  "3147 Fast-path EQs not allocated\n");
7062  rc = -ENOMEM;
7063  goto out_error;
7064  }
7065  for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7066  if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7068  "0522 Fast-path EQ (%d) not "
7069  "allocated\n", fcp_eqidx);
7070  rc = -ENOMEM;
7071  goto out_destroy_hba_eq;
7072  }
7073  rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7074  (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7075  if (rc) {
7077  "0523 Failed setup of fast-path EQ "
7078  "(%d), rc = 0x%x\n", fcp_eqidx, rc);
7079  goto out_destroy_hba_eq;
7080  }
7082  "2584 HBA EQ setup: "
7083  "queue[%d]-id=%d\n", fcp_eqidx,
7084  phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7085  }
7086 
7087  /* Set up fast-path FCP Response Complete Queue */
7088  if (!phba->sli4_hba.fcp_cq) {
7090  "3148 Fast-path FCP CQ array not "
7091  "allocated\n");
7092  rc = -ENOMEM;
7093  goto out_destroy_hba_eq;
7094  }
7095 
7096  for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7097  if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7099  "0526 Fast-path FCP CQ (%d) not "
7100  "allocated\n", fcp_cqidx);
7101  rc = -ENOMEM;
7102  goto out_destroy_fcp_cq;
7103  }
7104  rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7105  phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7106  if (rc) {
7108  "0527 Failed setup of fast-path FCP "
7109  "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7110  goto out_destroy_fcp_cq;
7111  }
7112 
7113  /* Setup fcp_cq_map for fast lookup */
7114  phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7115  phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7116 
7118  "2588 FCP CQ setup: cq[%d]-id=%d, "
7119  "parent seq[%d]-id=%d\n",
7120  fcp_cqidx,
7121  phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7122  fcp_cqidx,
7123  phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7124  }
7125 
7126  /* Set up fast-path FCP Work Queue */
7127  if (!phba->sli4_hba.fcp_wq) {
7129  "3149 Fast-path FCP WQ array not "
7130  "allocated\n");
7131  rc = -ENOMEM;
7132  goto out_destroy_fcp_cq;
7133  }
7134 
7135  for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7136  if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7138  "0534 Fast-path FCP WQ (%d) not "
7139  "allocated\n", fcp_wqidx);
7140  rc = -ENOMEM;
7141  goto out_destroy_fcp_wq;
7142  }
7143  rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7144  phba->sli4_hba.fcp_cq[fcp_wqidx],
7145  LPFC_FCP);
7146  if (rc) {
7148  "0535 Failed setup of fast-path FCP "
7149  "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7150  goto out_destroy_fcp_wq;
7151  }
7152 
7153  /* Bind this WQ to the next FCP ring */
7154  pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7155  pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7156  phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7157 
7159  "2591 FCP WQ setup: wq[%d]-id=%d, "
7160  "parent cq[%d]-id=%d\n",
7161  fcp_wqidx,
7162  phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7163  fcp_cq_index,
7164  phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7165  }
7166  /*
7167  * Set up Complete Queues (CQs)
7168  */
7169 
7170  /* Set up slow-path MBOX Complete Queue as the first CQ */
7171  if (!phba->sli4_hba.mbx_cq) {
7173  "0528 Mailbox CQ not allocated\n");
7174  rc = -ENOMEM;
7175  goto out_destroy_fcp_wq;
7176  }
7177  rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7178  phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7179  if (rc) {
7181  "0529 Failed setup of slow-path mailbox CQ: "
7182  "rc = 0x%x\n", rc);
7183  goto out_destroy_fcp_wq;
7184  }
7186  "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7187  phba->sli4_hba.mbx_cq->queue_id,
7188  phba->sli4_hba.hba_eq[0]->queue_id);
7189 
7190  /* Set up slow-path ELS Complete Queue */
7191  if (!phba->sli4_hba.els_cq) {
7193  "0530 ELS CQ not allocated\n");
7194  rc = -ENOMEM;
7195  goto out_destroy_mbx_cq;
7196  }
7197  rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7198  phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7199  if (rc) {
7201  "0531 Failed setup of slow-path ELS CQ: "
7202  "rc = 0x%x\n", rc);
7203  goto out_destroy_mbx_cq;
7204  }
7206  "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7207  phba->sli4_hba.els_cq->queue_id,
7208  phba->sli4_hba.hba_eq[0]->queue_id);
7209 
7210  /*
7211  * Set up all the Work Queues (WQs)
7212  */
7213 
7214  /* Set up Mailbox Command Queue */
7215  if (!phba->sli4_hba.mbx_wq) {
7217  "0538 Slow-path MQ not allocated\n");
7218  rc = -ENOMEM;
7219  goto out_destroy_els_cq;
7220  }
7221  rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7222  phba->sli4_hba.mbx_cq, LPFC_MBOX);
7223  if (rc) {
7225  "0539 Failed setup of slow-path MQ: "
7226  "rc = 0x%x\n", rc);
7227  goto out_destroy_els_cq;
7228  }
7230  "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7231  phba->sli4_hba.mbx_wq->queue_id,
7232  phba->sli4_hba.mbx_cq->queue_id);
7233 
7234  /* Set up slow-path ELS Work Queue */
7235  if (!phba->sli4_hba.els_wq) {
7237  "0536 Slow-path ELS WQ not allocated\n");
7238  rc = -ENOMEM;
7239  goto out_destroy_mbx_wq;
7240  }
7241  rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7242  phba->sli4_hba.els_cq, LPFC_ELS);
7243  if (rc) {
7245  "0537 Failed setup of slow-path ELS WQ: "
7246  "rc = 0x%x\n", rc);
7247  goto out_destroy_mbx_wq;
7248  }
7249 
7250  /* Bind this WQ to the ELS ring */
7251  pring = &psli->ring[LPFC_ELS_RING];
7252  pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7253  phba->sli4_hba.els_cq->pring = pring;
7254 
7256  "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7257  phba->sli4_hba.els_wq->queue_id,
7258  phba->sli4_hba.els_cq->queue_id);
7259 
7260  /*
7261  * Create Receive Queue (RQ)
7262  */
7263  if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7265  "0540 Receive Queue not allocated\n");
7266  rc = -ENOMEM;
7267  goto out_destroy_els_wq;
7268  }
7269 
7270  lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7271  lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7272 
7273  rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7274  phba->sli4_hba.els_cq, LPFC_USOL);
7275  if (rc) {
7277  "0541 Failed setup of Receive Queue: "
7278  "rc = 0x%x\n", rc);
7279  goto out_destroy_fcp_wq;
7280  }
7281 
7283  "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7284  "parent cq-id=%d\n",
7285  phba->sli4_hba.hdr_rq->queue_id,
7286  phba->sli4_hba.dat_rq->queue_id,
7287  phba->sli4_hba.els_cq->queue_id);
7288  return 0;
7289 
7290 out_destroy_els_wq:
7291  lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7292 out_destroy_mbx_wq:
7293  lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7294 out_destroy_els_cq:
7295  lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7296 out_destroy_mbx_cq:
7297  lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7298 out_destroy_fcp_wq:
7299  for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7300  lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7301 out_destroy_fcp_cq:
7302  for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7303  lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7304 out_destroy_hba_eq:
7305  for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7306  lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7307 out_error:
7308  return rc;
7309 }
7310 
7323 void
7325 {
7326  int fcp_qidx;
7327 
7328  /* Unset mailbox command work queue */
7329  lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7330  /* Unset ELS work queue */
7331  lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7332  /* Unset unsolicited receive queue */
7333  lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7334  /* Unset FCP work queue */
7335  if (phba->sli4_hba.fcp_wq) {
7336  for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7337  fcp_qidx++)
7338  lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7339  }
7340  /* Unset mailbox command complete queue */
7341  lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7342  /* Unset ELS complete queue */
7343  lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7344  /* Unset FCP response complete queue */
7345  if (phba->sli4_hba.fcp_cq) {
7346  for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7347  fcp_qidx++)
7348  lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7349  }
7350  /* Unset fast-path event queue */
7351  if (phba->sli4_hba.hba_eq) {
7352  for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7353  fcp_qidx++)
7354  lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7355  }
7356 }
7357 
7374 static int
7375 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7376 {
7377  struct lpfc_cq_event *cq_event;
7378  int i;
7379 
7380  for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7381  cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7382  if (!cq_event)
7383  goto out_pool_create_fail;
7384  list_add_tail(&cq_event->list,
7385  &phba->sli4_hba.sp_cqe_event_pool);
7386  }
7387  return 0;
7388 
7389 out_pool_create_fail:
7390  lpfc_sli4_cq_event_pool_destroy(phba);
7391  return -ENOMEM;
7392 }
7393 
7404 static void
7405 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7406 {
7407  struct lpfc_cq_event *cq_event, *next_cq_event;
7408 
7409  list_for_each_entry_safe(cq_event, next_cq_event,
7410  &phba->sli4_hba.sp_cqe_event_pool, list) {
7411  list_del(&cq_event->list);
7412  kfree(cq_event);
7413  }
7414 }
7415 
7426 struct lpfc_cq_event *
7428 {
7429  struct lpfc_cq_event *cq_event = NULL;
7430 
7431  list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7432  struct lpfc_cq_event, list);
7433  return cq_event;
7434 }
7435 
7446 struct lpfc_cq_event *
7448 {
7449  struct lpfc_cq_event *cq_event;
7450  unsigned long iflags;
7451 
7452  spin_lock_irqsave(&phba->hbalock, iflags);
7453  cq_event = __lpfc_sli4_cq_event_alloc(phba);
7454  spin_unlock_irqrestore(&phba->hbalock, iflags);
7455  return cq_event;
7456 }
7457 
7466 void
7468  struct lpfc_cq_event *cq_event)
7469 {
7470  list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7471 }
7472 
7481 void
7483  struct lpfc_cq_event *cq_event)
7484 {
7485  unsigned long iflags;
7486  spin_lock_irqsave(&phba->hbalock, iflags);
7487  __lpfc_sli4_cq_event_release(phba, cq_event);
7488  spin_unlock_irqrestore(&phba->hbalock, iflags);
7489 }
7490 
7498 static void
7499 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7500 {
7501  LIST_HEAD(cqelist);
7502  struct lpfc_cq_event *cqe;
7503  unsigned long iflags;
7504 
7505  /* Retrieve all the pending WCQEs from pending WCQE lists */
7506  spin_lock_irqsave(&phba->hbalock, iflags);
7507  /* Pending FCP XRI abort events */
7508  list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7509  &cqelist);
7510  /* Pending ELS XRI abort events */
7511  list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7512  &cqelist);
7513  /* Pending asynnc events */
7514  list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7515  &cqelist);
7516  spin_unlock_irqrestore(&phba->hbalock, iflags);
7517 
7518  while (!list_empty(&cqelist)) {
7519  list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7520  lpfc_sli4_cq_event_release(phba, cqe);
7521  }
7522 }
7523 
7536 int
7538 {
7539  LPFC_MBOXQ_t *mboxq;
7540  uint32_t rc = 0, if_type;
7541  uint32_t shdr_status, shdr_add_status;
7542  uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7543  union lpfc_sli4_cfg_shdr *shdr;
7544  struct lpfc_register reg_data;
7545  uint16_t devid;
7546 
7547  if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7548  switch (if_type) {
7550  mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7551  GFP_KERNEL);
7552  if (!mboxq) {
7554  "0494 Unable to allocate memory for "
7555  "issuing SLI_FUNCTION_RESET mailbox "
7556  "command\n");
7557  return -ENOMEM;
7558  }
7559 
7560  /* Setup PCI function reset mailbox-ioctl command */
7564  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7565  shdr = (union lpfc_sli4_cfg_shdr *)
7566  &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7567  shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7568  shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7569  &shdr->response);
7570  if (rc != MBX_TIMEOUT)
7571  mempool_free(mboxq, phba->mbox_mem_pool);
7572  if (shdr_status || shdr_add_status || rc) {
7574  "0495 SLI_FUNCTION_RESET mailbox "
7575  "failed with status x%x add_status x%x,"
7576  " mbx status x%x\n",
7577  shdr_status, shdr_add_status, rc);
7578  rc = -ENXIO;
7579  }
7580  break;
7582  for (num_resets = 0;
7583  num_resets < MAX_IF_TYPE_2_RESETS;
7584  num_resets++) {
7585  reg_data.word0 = 0;
7586  bf_set(lpfc_sliport_ctrl_end, &reg_data,
7588  bf_set(lpfc_sliport_ctrl_ip, &reg_data,
7590  writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7591  CTRLregaddr);
7592  /* flush */
7593  pci_read_config_word(phba->pcidev,
7594  PCI_DEVICE_ID, &devid);
7595  /*
7596  * Poll the Port Status Register and wait for RDY for
7597  * up to 10 seconds. If the port doesn't respond, treat
7598  * it as an error. If the port responds with RN, start
7599  * the loop again.
7600  */
7601  for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7602  msleep(10);
7603  if (lpfc_readl(phba->sli4_hba.u.if_type2.
7604  STATUSregaddr, &reg_data.word0)) {
7605  rc = -ENODEV;
7606  goto out;
7607  }
7608  if (bf_get(lpfc_sliport_status_rn, &reg_data))
7609  reset_again++;
7610  if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7611  break;
7612  }
7613 
7614  /*
7615  * If the port responds to the init request with
7616  * reset needed, delay for a bit and restart the loop.
7617  */
7618  if (reset_again && (rdy_chk < 1000)) {
7619  msleep(10);
7620  reset_again = 0;
7621  continue;
7622  }
7623 
7624  /* Detect any port errors. */
7625  if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7626  (rdy_chk >= 1000)) {
7627  phba->work_status[0] = readl(
7628  phba->sli4_hba.u.if_type2.ERR1regaddr);
7629  phba->work_status[1] = readl(
7630  phba->sli4_hba.u.if_type2.ERR2regaddr);
7632  "2890 Port error detected during port "
7633  "reset(%d): wait_tmo:%d ms, "
7634  "port status reg 0x%x, "
7635  "error 1=0x%x, error 2=0x%x\n",
7636  num_resets, rdy_chk*10,
7637  reg_data.word0,
7638  phba->work_status[0],
7639  phba->work_status[1]);
7640  rc = -ENODEV;
7641  }
7642 
7643  /*
7644  * Terminate the outer loop provided the Port indicated
7645  * ready within 10 seconds.
7646  */
7647  if (rdy_chk < 1000)
7648  break;
7649  }
7650  /* delay driver action following IF_TYPE_2 function reset */
7651  msleep(100);
7652  break;
7654  default:
7655  break;
7656  }
7657 
7658 out:
7659  /* Catch the not-ready port failure after a port reset. */
7660  if (num_resets >= MAX_IF_TYPE_2_RESETS)
7661  rc = -ENODEV;
7662 
7663  return rc;
7664 }
7665 
7676 static int
7677 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7678 {
7679  LPFC_MBOXQ_t *mboxq;
7680  int length, cmdsent;
7681  uint32_t mbox_tmo;
7682  uint32_t rc = 0;
7683  uint32_t shdr_status, shdr_add_status;
7684  union lpfc_sli4_cfg_shdr *shdr;
7685 
7686  if (cnt == 0) {
7688  "2518 Requested to send 0 NOP mailbox cmd\n");
7689  return cnt;
7690  }
7691 
7693  if (!mboxq) {
7695  "2519 Unable to allocate memory for issuing "
7696  "NOP mailbox command\n");
7697  return 0;
7698  }
7699 
7700  /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7701  length = (sizeof(struct lpfc_mbx_nop) -
7703 
7704  for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7706  LPFC_MBOX_OPCODE_NOP, length,
7708  if (!phba->sli4_hba.intr_enable)
7709  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7710  else {
7711  mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7712  rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7713  }
7714  if (rc == MBX_TIMEOUT)
7715  break;
7716  /* Check return status */
7717  shdr = (union lpfc_sli4_cfg_shdr *)
7718  &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7719  shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7720  shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7721  &shdr->response);
7722  if (shdr_status || shdr_add_status || rc) {
7724  "2520 NOP mailbox command failed "
7725  "status x%x add_status x%x mbx "
7726  "status x%x\n", shdr_status,
7727  shdr_add_status, rc);
7728  break;
7729  }
7730  }
7731 
7732  if (rc != MBX_TIMEOUT)
7733  mempool_free(mboxq, phba->mbox_mem_pool);
7734 
7735  return cmdsent;
7736 }
7737 
7749 static int
7750 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7751 {
7752  struct pci_dev *pdev;
7753  unsigned long bar0map_len, bar1map_len, bar2map_len;
7754  int error = -ENODEV;
7755  uint32_t if_type;
7756 
7757  /* Obtain PCI device reference */
7758  if (!phba->pcidev)
7759  return error;
7760  else
7761  pdev = phba->pcidev;
7762 
7763  /* Set the device DMA mask size */
7764  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7765  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7766  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7767  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7768  return error;
7769  }
7770  }
7771 
7772  /*
7773  * The BARs and register set definitions and offset locations are
7774  * dependent on the if_type.
7775  */
7776  if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7777  &phba->sli4_hba.sli_intf.word0)) {
7778  return error;
7779  }
7780 
7781  /* There is no SLI3 failback for SLI4 devices. */
7782  if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7785  "2894 SLI_INTF reg contents invalid "
7786  "sli_intf reg 0x%x\n",
7787  phba->sli4_hba.sli_intf.word0);
7788  return error;
7789  }
7790 
7791  if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7792  /*
7793  * Get the bus address of SLI4 device Bar regions and the
7794  * number of bytes required by each mapping. The mapping of the
7795  * particular PCI BARs regions is dependent on the type of
7796  * SLI4 device.
7797  */
7798  if (pci_resource_start(pdev, 0)) {
7799  phba->pci_bar0_map = pci_resource_start(pdev, 0);
7800  bar0map_len = pci_resource_len(pdev, 0);
7801 
7802  /*
7803  * Map SLI4 PCI Config Space Register base to a kernel virtual
7804  * addr
7805  */
7806  phba->sli4_hba.conf_regs_memmap_p =
7807  ioremap(phba->pci_bar0_map, bar0map_len);
7808  if (!phba->sli4_hba.conf_regs_memmap_p) {
7809  dev_printk(KERN_ERR, &pdev->dev,
7810  "ioremap failed for SLI4 PCI config "
7811  "registers.\n");
7812  goto out;
7813  }
7814  /* Set up BAR0 PCI config space register memory map */
7815  lpfc_sli4_bar0_register_memmap(phba, if_type);
7816  } else {
7817  phba->pci_bar0_map = pci_resource_start(pdev, 1);
7818  bar0map_len = pci_resource_len(pdev, 1);
7819  if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7820  dev_printk(KERN_ERR, &pdev->dev,
7821  "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7822  goto out;
7823  }
7824  phba->sli4_hba.conf_regs_memmap_p =
7825  ioremap(phba->pci_bar0_map, bar0map_len);
7826  if (!phba->sli4_hba.conf_regs_memmap_p) {
7827  dev_printk(KERN_ERR, &pdev->dev,
7828  "ioremap failed for SLI4 PCI config "
7829  "registers.\n");
7830  goto out;
7831  }
7832  lpfc_sli4_bar0_register_memmap(phba, if_type);
7833  }
7834 
7835  if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7836  (pci_resource_start(pdev, 2))) {
7837  /*
7838  * Map SLI4 if type 0 HBA Control Register base to a kernel
7839  * virtual address and setup the registers.
7840  */
7841  phba->pci_bar1_map = pci_resource_start(pdev, 2);
7842  bar1map_len = pci_resource_len(pdev, 2);
7843  phba->sli4_hba.ctrl_regs_memmap_p =
7844  ioremap(phba->pci_bar1_map, bar1map_len);
7845  if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7846  dev_printk(KERN_ERR, &pdev->dev,
7847  "ioremap failed for SLI4 HBA control registers.\n");
7848  goto out_iounmap_conf;
7849  }
7850  lpfc_sli4_bar1_register_memmap(phba);
7851  }
7852 
7853  if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7854  (pci_resource_start(pdev, 4))) {
7855  /*
7856  * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7857  * virtual address and setup the registers.
7858  */
7859  phba->pci_bar2_map = pci_resource_start(pdev, 4);
7860  bar2map_len = pci_resource_len(pdev, 4);
7861  phba->sli4_hba.drbl_regs_memmap_p =
7862  ioremap(phba->pci_bar2_map, bar2map_len);
7863  if (!phba->sli4_hba.drbl_regs_memmap_p) {
7864  dev_printk(KERN_ERR, &pdev->dev,
7865  "ioremap failed for SLI4 HBA doorbell registers.\n");
7866  goto out_iounmap_ctrl;
7867  }
7868  error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7869  if (error)
7870  goto out_iounmap_all;
7871  }
7872 
7873  return 0;
7874 
7875 out_iounmap_all:
7876  iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7877 out_iounmap_ctrl:
7878  iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7879 out_iounmap_conf:
7880  iounmap(phba->sli4_hba.conf_regs_memmap_p);
7881 out:
7882  return error;
7883 }
7884 
7892 static void
7893 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7894 {
7895  uint32_t if_type;
7896  if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7897 
7898  switch (if_type) {
7900  iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7901  iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7902  iounmap(phba->sli4_hba.conf_regs_memmap_p);
7903  break;
7905  iounmap(phba->sli4_hba.conf_regs_memmap_p);
7906  break;
7908  default:
7909  dev_printk(KERN_ERR, &phba->pcidev->dev,
7910  "FATAL - unsupported SLI4 interface type - %d\n",
7911  if_type);
7912  break;
7913  }
7914 }
7915 
7936 static int
7937 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7938 {
7939  int rc, i;
7940  LPFC_MBOXQ_t *pmb;
7941 
7942  /* Set up MSI-X multi-message vectors */
7943  for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7944  phba->msix_entries[i].entry = i;
7945 
7946  /* Configure MSI-X capability structure */
7947  rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7948  ARRAY_SIZE(phba->msix_entries));
7949  if (rc) {
7951  "0420 PCI enable MSI-X failed (%d)\n", rc);
7952  goto msi_fail_out;
7953  }
7954  for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7956  "0477 MSI-X entry[%d]: vector=x%x "
7957  "message=%d\n", i,
7958  phba->msix_entries[i].vector,
7959  phba->msix_entries[i].entry);
7960  /*
7961  * Assign MSI-X vectors to interrupt handlers
7962  */
7963 
7964  /* vector-0 is associated to slow-path handler */
7965  rc = request_irq(phba->msix_entries[0].vector,
7968  if (rc) {
7970  "0421 MSI-X slow-path request_irq failed "
7971  "(%d)\n", rc);
7972  goto msi_fail_out;
7973  }
7974 
7975  /* vector-1 is associated to fast-path handler */
7976  rc = request_irq(phba->msix_entries[1].vector,
7979 
7980  if (rc) {
7982  "0429 MSI-X fast-path request_irq failed "
7983  "(%d)\n", rc);
7984  goto irq_fail_out;
7985  }
7986 
7987  /*
7988  * Configure HBA MSI-X attention conditions to messages
7989  */
7991 
7992  if (!pmb) {
7993  rc = -ENOMEM;
7995  "0474 Unable to allocate memory for issuing "
7996  "MBOX_CONFIG_MSI command\n");
7997  goto mem_fail_out;
7998  }
7999  rc = lpfc_config_msi(phba, pmb);
8000  if (rc)
8001  goto mbx_fail_out;
8002  rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8003  if (rc != MBX_SUCCESS) {
8005  "0351 Config MSI mailbox command failed, "
8006  "mbxCmd x%x, mbxStatus x%x\n",
8007  pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8008  goto mbx_fail_out;
8009  }
8010 
8011  /* Free memory allocated for mailbox command */
8012  mempool_free(pmb, phba->mbox_mem_pool);
8013  return rc;
8014 
8015 mbx_fail_out:
8016  /* Free memory allocated for mailbox command */
8017  mempool_free(pmb, phba->mbox_mem_pool);
8018 
8019 mem_fail_out:
8020  /* free the irq already requested */
8021  free_irq(phba->msix_entries[1].vector, phba);
8022 
8023 irq_fail_out:
8024  /* free the irq already requested */
8025  free_irq(phba->msix_entries[0].vector, phba);
8026 
8027 msi_fail_out:
8028  /* Unconfigure MSI-X capability structure */
8029  pci_disable_msix(phba->pcidev);
8030  return rc;
8031 }
8032 
8040 static void
8041 lpfc_sli_disable_msix(struct lpfc_hba *phba)
8042 {
8043  int i;
8044 
8045  /* Free up MSI-X multi-message vectors */
8046  for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8047  free_irq(phba->msix_entries[i].vector, phba);
8048  /* Disable MSI-X */
8049  pci_disable_msix(phba->pcidev);
8050 
8051  return;
8052 }
8053 
8068 static int
8069 lpfc_sli_enable_msi(struct lpfc_hba *phba)
8070 {
8071  int rc;
8072 
8073  rc = pci_enable_msi(phba->pcidev);
8074  if (!rc)
8076  "0462 PCI enable MSI mode success.\n");
8077  else {
8079  "0471 PCI enable MSI mode failed (%d)\n", rc);
8080  return rc;
8081  }
8082 
8083  rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8084  IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8085  if (rc) {
8086  pci_disable_msi(phba->pcidev);
8088  "0478 MSI request_irq failed (%d)\n", rc);
8089  }
8090  return rc;
8091 }
8092 
8103 static void
8104 lpfc_sli_disable_msi(struct lpfc_hba *phba)
8105 {
8106  free_irq(phba->pcidev->irq, phba);
8107  pci_disable_msi(phba->pcidev);
8108  return;
8109 }
8110 
8127 static uint32_t
8128 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8129 {
8130  uint32_t intr_mode = LPFC_INTR_ERROR;
8131  int retval;
8132 
8133  if (cfg_mode == 2) {
8134  /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8135  retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8136  if (!retval) {
8137  /* Now, try to enable MSI-X interrupt mode */
8138  retval = lpfc_sli_enable_msix(phba);
8139  if (!retval) {
8140  /* Indicate initialization to MSI-X mode */
8141  phba->intr_type = MSIX;
8142  intr_mode = 2;
8143  }
8144  }
8145  }
8146 
8147  /* Fallback to MSI if MSI-X initialization failed */
8148  if (cfg_mode >= 1 && phba->intr_type == NONE) {
8149  retval = lpfc_sli_enable_msi(phba);
8150  if (!retval) {
8151  /* Indicate initialization to MSI mode */
8152  phba->intr_type = MSI;
8153  intr_mode = 1;
8154  }
8155  }
8156 
8157  /* Fallback to INTx if both MSI-X/MSI initalization failed */
8158  if (phba->intr_type == NONE) {
8159  retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8160  IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8161  if (!retval) {
8162  /* Indicate initialization to INTx mode */
8163  phba->intr_type = INTx;
8164  intr_mode = 0;
8165  }
8166  }
8167  return intr_mode;
8168 }
8169 
8179 static void
8180 lpfc_sli_disable_intr(struct lpfc_hba *phba)
8181 {
8182  /* Disable the currently initialized interrupt mode */
8183  if (phba->intr_type == MSIX)
8184  lpfc_sli_disable_msix(phba);
8185  else if (phba->intr_type == MSI)
8186  lpfc_sli_disable_msi(phba);
8187  else if (phba->intr_type == INTx)
8188  free_irq(phba->pcidev->irq, phba);
8189 
8190  /* Reset interrupt management states */
8191  phba->intr_type = NONE;
8192  phba->sli.slistat.sli_intr = 0;
8193 
8194  return;
8195 }
8196 
8217 static int
8218 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8219 {
8220  int vectors, rc, index;
8221 
8222  /* Set up MSI-X multi-message vectors */
8223  for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8224  phba->sli4_hba.msix_entries[index].entry = index;
8225 
8226  /* Configure MSI-X capability structure */
8227  vectors = phba->cfg_fcp_io_channel;
8228 enable_msix_vectors:
8229  rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8230  vectors);
8231  if (rc > 1) {
8232  vectors = rc;
8233  goto enable_msix_vectors;
8234  } else if (rc) {
8236  "0484 PCI enable MSI-X failed (%d)\n", rc);
8237  goto msi_fail_out;
8238  }
8239 
8240  /* Log MSI-X vector assignment */
8241  for (index = 0; index < vectors; index++)
8243  "0489 MSI-X entry[%d]: vector=x%x "
8244  "message=%d\n", index,
8245  phba->sli4_hba.msix_entries[index].vector,
8246  phba->sli4_hba.msix_entries[index].entry);
8247 
8248  /*
8249  * Assign MSI-X vectors to interrupt handlers
8250  */
8251  for (index = 0; index < vectors; index++) {
8252  memset(&phba->sli4_hba.handler_name[index], 0, 16);
8253  sprintf((char *)&phba->sli4_hba.handler_name[index],
8254  LPFC_DRIVER_HANDLER_NAME"%d", index);
8255 
8256  phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8257  phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8258  atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8259  rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8261  (char *)&phba->sli4_hba.handler_name[index],
8262  &phba->sli4_hba.fcp_eq_hdl[index]);
8263  if (rc) {
8265  "0486 MSI-X fast-path (%d) "
8266  "request_irq failed (%d)\n", index, rc);
8267  goto cfg_fail_out;
8268  }
8269  }
8270 
8271  if (vectors != phba->cfg_fcp_io_channel) {
8273  "3238 Reducing IO channels to match number of "
8274  "MSI-X vectors, requested %d got %d\n",
8275  phba->cfg_fcp_io_channel, vectors);
8276  phba->cfg_fcp_io_channel = vectors;
8277  }
8278  return rc;
8279 
8280 cfg_fail_out:
8281  /* free the irq already requested */
8282  for (--index; index >= 0; index--)
8283  free_irq(phba->sli4_hba.msix_entries[index].vector,
8284  &phba->sli4_hba.fcp_eq_hdl[index]);
8285 
8286 msi_fail_out:
8287  /* Unconfigure MSI-X capability structure */
8288  pci_disable_msix(phba->pcidev);
8289  return rc;
8290 }
8291 
8299 static void
8300 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8301 {
8302  int index;
8303 
8304  /* Free up MSI-X multi-message vectors */
8305  for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8306  free_irq(phba->sli4_hba.msix_entries[index].vector,
8307  &phba->sli4_hba.fcp_eq_hdl[index]);
8308 
8309  /* Disable MSI-X */
8310  pci_disable_msix(phba->pcidev);
8311 
8312  return;
8313 }
8314 
8329 static int
8330 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8331 {
8332  int rc, index;
8333 
8334  rc = pci_enable_msi(phba->pcidev);
8335  if (!rc)
8337  "0487 PCI enable MSI mode success.\n");
8338  else {
8340  "0488 PCI enable MSI mode failed (%d)\n", rc);
8341  return rc;
8342  }
8343 
8344  rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8345  IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8346  if (rc) {
8347  pci_disable_msi(phba->pcidev);
8349  "0490 MSI request_irq failed (%d)\n", rc);
8350  return rc;
8351  }
8352 
8353  for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8354  phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8355  phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8356  }
8357 
8358  return 0;
8359 }
8360 
8371 static void
8372 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8373 {
8374  free_irq(phba->pcidev->irq, phba);
8375  pci_disable_msi(phba->pcidev);
8376  return;
8377 }
8378 
8395 static uint32_t
8396 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8397 {
8398  uint32_t intr_mode = LPFC_INTR_ERROR;
8399  int retval, index;
8400 
8401  if (cfg_mode == 2) {
8402  /* Preparation before conf_msi mbox cmd */
8403  retval = 0;
8404  if (!retval) {
8405  /* Now, try to enable MSI-X interrupt mode */
8406  retval = lpfc_sli4_enable_msix(phba);
8407  if (!retval) {
8408  /* Indicate initialization to MSI-X mode */
8409  phba->intr_type = MSIX;
8410  intr_mode = 2;
8411  }
8412  }
8413  }
8414 
8415  /* Fallback to MSI if MSI-X initialization failed */
8416  if (cfg_mode >= 1 && phba->intr_type == NONE) {
8417  retval = lpfc_sli4_enable_msi(phba);
8418  if (!retval) {
8419  /* Indicate initialization to MSI mode */
8420  phba->intr_type = MSI;
8421  intr_mode = 1;
8422  }
8423  }
8424 
8425  /* Fallback to INTx if both MSI-X/MSI initalization failed */
8426  if (phba->intr_type == NONE) {
8427  retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8428  IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8429  if (!retval) {
8430  /* Indicate initialization to INTx mode */
8431  phba->intr_type = INTx;
8432  intr_mode = 0;
8433  for (index = 0; index < phba->cfg_fcp_io_channel;
8434  index++) {
8435  phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8436  phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8437  atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8438  fcp_eq_in_use, 1);
8439  }
8440  }
8441  }
8442  return intr_mode;
8443 }
8444 
8454 static void
8455 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8456 {
8457  /* Disable the currently initialized interrupt mode */
8458  if (phba->intr_type == MSIX)
8459  lpfc_sli4_disable_msix(phba);
8460  else if (phba->intr_type == MSI)
8461  lpfc_sli4_disable_msi(phba);
8462  else if (phba->intr_type == INTx)
8463  free_irq(phba->pcidev->irq, phba);
8464 
8465  /* Reset interrupt management states */
8466  phba->intr_type = NONE;
8467  phba->sli.slistat.sli_intr = 0;
8468 
8469  return;
8470 }
8471 
8479 static void
8480 lpfc_unset_hba(struct lpfc_hba *phba)
8481 {
8482  struct lpfc_vport *vport = phba->pport;
8483  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8484 
8485  spin_lock_irq(shost->host_lock);
8486  vport->load_flag |= FC_UNLOADING;
8487  spin_unlock_irq(shost->host_lock);
8488 
8489  kfree(phba->vpi_bmask);
8490  kfree(phba->vpi_ids);
8491 
8492  lpfc_stop_hba_timers(phba);
8493 
8494  phba->pport->work_port_events = 0;
8495 
8496  lpfc_sli_hba_down(phba);
8497 
8498  lpfc_sli_brdrestart(phba);
8499 
8500  lpfc_sli_disable_intr(phba);
8501 
8502  return;
8503 }
8504 
8512 static void
8513 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8514 {
8515  struct lpfc_vport *vport = phba->pport;
8516  struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8517 
8518  spin_lock_irq(shost->host_lock);
8519  vport->load_flag |= FC_UNLOADING;
8520  spin_unlock_irq(shost->host_lock);
8521 
8522  phba->pport->work_port_events = 0;
8523 
8524  /* Stop the SLI4 device port */
8525  lpfc_stop_port(phba);
8526 
8527  lpfc_sli4_disable_intr(phba);
8528 
8529  /* Reset SLI4 HBA FCoE function */
8532 
8533  return;
8534 }
8535 
8549 static void
8550 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8551 {
8552  int wait_time = 0;
8553  int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8554  int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8555 
8556  while (!fcp_xri_cmpl || !els_xri_cmpl) {
8557  if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8558  if (!fcp_xri_cmpl)
8560  "2877 FCP XRI exchange busy "
8561  "wait time: %d seconds.\n",
8562  wait_time/1000);
8563  if (!els_xri_cmpl)
8565  "2878 ELS XRI exchange busy "
8566  "wait time: %d seconds.\n",
8567  wait_time/1000);
8569  wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8570  } else {
8572  wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8573  }
8574  fcp_xri_cmpl =
8575  list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8576  els_xri_cmpl =
8577  list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8578  }
8579 }
8580 
8591 static void
8592 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8593 {
8594  int wait_cnt = 0;
8595  LPFC_MBOXQ_t *mboxq;
8596  struct pci_dev *pdev = phba->pcidev;
8597 
8598  lpfc_stop_hba_timers(phba);
8599  phba->sli4_hba.intr_enable = 0;
8600 
8601  /*
8602  * Gracefully wait out the potential current outstanding asynchronous
8603  * mailbox command.
8604  */
8605 
8606  /* First, block any pending async mailbox command from posted */
8607  spin_lock_irq(&phba->hbalock);
8608  phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8609  spin_unlock_irq(&phba->hbalock);
8610  /* Now, trying to wait it out if we can */
8611  while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8612  msleep(10);
8613  if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8614  break;
8615  }
8616  /* Forcefully release the outstanding mailbox command if timed out */
8617  if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8618  spin_lock_irq(&phba->hbalock);
8619  mboxq = phba->sli.mbox_active;
8620  mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8621  __lpfc_mbox_cmpl_put(phba, mboxq);
8622  phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8623  phba->sli.mbox_active = NULL;
8624  spin_unlock_irq(&phba->hbalock);
8625  }
8626 
8627  /* Abort all iocbs associated with the hba */
8629 
8630  /* Wait for completion of device XRI exchange busy */
8631  lpfc_sli4_xri_exchange_busy_wait(phba);
8632 
8633  /* Disable PCI subsystem interrupt */
8634  lpfc_sli4_disable_intr(phba);
8635 
8636  /* Disable SR-IOV if enabled */
8637  if (phba->cfg_sriov_nr_virtfn)
8638  pci_disable_sriov(pdev);
8639 
8640  /* Stop kthread signal shall trigger work_done one more time */
8641  kthread_stop(phba->worker_thread);
8642 
8643  /* Reset SLI4 HBA FCoE function */
8646 
8647  /* Stop the SLI4 device port */
8648  phba->pport->work_port_events = 0;
8649 }
8650 
8663 int
8665 {
8666  int rc;
8667  struct lpfc_mqe *mqe;
8668  struct lpfc_pc_sli4_params *sli4_params;
8669  uint32_t mbox_tmo;
8670 
8671  rc = 0;
8672  mqe = &mboxq->u.mqe;
8673 
8674  /* Read the port's SLI4 Parameters port capabilities */
8675  lpfc_pc_sli4_params(mboxq);
8676  if (!phba->sli4_hba.intr_enable)
8677  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8678  else {
8679  mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8680  rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8681  }
8682 
8683  if (unlikely(rc))
8684  return 1;
8685 
8686  sli4_params = &phba->sli4_hba.pc_sli4_params;
8687  sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8688  sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8689  sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8690  sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8691  &mqe->un.sli4_params);
8692  sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8693  &mqe->un.sli4_params);
8694  sli4_params->proto_types = mqe->un.sli4_params.word3;
8695  sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8696  sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8697  sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8698  sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8699  sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8700  sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8701  sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8702  sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8703  sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8704  sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8705  sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8706  sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8707  sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8708  sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8709  sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8710  sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8711  sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8712  sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8713  sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8714  sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8715 
8716  /* Make sure that sge_supp_len can be handled by the driver */
8717  if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8718  sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8719 
8720  return rc;
8721 }
8722 
8735 int
8737 {
8738  int rc;
8739  struct lpfc_mqe *mqe = &mboxq->u.mqe;
8740  struct lpfc_pc_sli4_params *sli4_params;
8741  uint32_t mbox_tmo;
8742  int length;
8743  struct lpfc_sli4_parameters *mbx_sli4_parameters;
8744 
8745  /*
8746  * By default, the driver assumes the SLI4 port requires RPI
8747  * header postings. The SLI4_PARAM response will correct this
8748  * assumption.
8749  */
8750  phba->sli4_hba.rpi_hdrs_in_use = 1;
8751 
8752  /* Read the port's SLI4 Config Parameters */
8753  length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8754  sizeof(struct lpfc_sli4_cfg_mhdr));
8757  length, LPFC_SLI4_MBX_EMBED);
8758  if (!phba->sli4_hba.intr_enable)
8759  rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8760  else {
8761  mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8762  rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8763  }
8764  if (unlikely(rc))
8765  return rc;
8766  sli4_params = &phba->sli4_hba.pc_sli4_params;
8767  mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8768  sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8769  sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8770  sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8771  sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8772  mbx_sli4_parameters);
8773  sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8774  mbx_sli4_parameters);
8775  if (bf_get(cfg_phwq, mbx_sli4_parameters))
8777  else
8779  sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8780  sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8781  sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8782  sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8783  sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8784  sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8785  sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8786  mbx_sli4_parameters);
8787  sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8788  mbx_sli4_parameters);
8789  phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8790  phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8791 
8792  /* Make sure that sge_supp_len can be handled by the driver */
8793  if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8794  sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8795 
8796  return 0;
8797 }
8798 
8816 static int __devinit
8817 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8818 {
8819  struct lpfc_hba *phba;
8820  struct lpfc_vport *vport = NULL;
8821  struct Scsi_Host *shost = NULL;
8822  int error;
8823  uint32_t cfg_mode, intr_mode;
8824 
8825  /* Allocate memory for HBA structure */
8826  phba = lpfc_hba_alloc(pdev);
8827  if (!phba)
8828  return -ENOMEM;
8829 
8830  /* Perform generic PCI device enabling operation */
8831  error = lpfc_enable_pci_dev(phba);
8832  if (error)
8833  goto out_free_phba;
8834 
8835  /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8836  error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8837  if (error)
8838  goto out_disable_pci_dev;
8839 
8840  /* Set up SLI-3 specific device PCI memory space */
8841  error = lpfc_sli_pci_mem_setup(phba);
8842  if (error) {
8844  "1402 Failed to set up pci memory space.\n");
8845  goto out_disable_pci_dev;
8846  }
8847 
8848  /* Set up phase-1 common device driver resources */
8849  error = lpfc_setup_driver_resource_phase1(phba);
8850  if (error) {
8852  "1403 Failed to set up driver resource.\n");
8853  goto out_unset_pci_mem_s3;
8854  }
8855 
8856  /* Set up SLI-3 specific device driver resources */
8857  error = lpfc_sli_driver_resource_setup(phba);
8858  if (error) {
8860  "1404 Failed to set up driver resource.\n");
8861  goto out_unset_pci_mem_s3;
8862  }
8863 
8864  /* Initialize and populate the iocb list per host */
8865  error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8866  if (error) {
8868  "1405 Failed to initialize iocb list.\n");
8869  goto out_unset_driver_resource_s3;
8870  }
8871 
8872  /* Set up common device driver resources */
8873  error = lpfc_setup_driver_resource_phase2(phba);
8874  if (error) {
8876  "1406 Failed to set up driver resource.\n");
8877  goto out_free_iocb_list;
8878  }
8879 
8880  /* Get the default values for Model Name and Description */
8881  lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8882 
8883  /* Create SCSI host to the physical port */
8884  error = lpfc_create_shost(phba);
8885  if (error) {
8887  "1407 Failed to create scsi host.\n");
8888  goto out_unset_driver_resource;
8889  }
8890 
8891  /* Configure sysfs attributes */
8892  vport = phba->pport;
8893  error = lpfc_alloc_sysfs_attr(vport);
8894  if (error) {
8896  "1476 Failed to allocate sysfs attr\n");
8897  goto out_destroy_shost;
8898  }
8899 
8900  shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8901  /* Now, trying to enable interrupt and bring up the device */
8902  cfg_mode = phba->cfg_use_msi;
8903  while (true) {
8904  /* Put device to a known state before enabling interrupt */
8905  lpfc_stop_port(phba);
8906  /* Configure and enable interrupt */
8907  intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8908  if (intr_mode == LPFC_INTR_ERROR) {
8910  "0431 Failed to enable interrupt.\n");
8911  error = -ENODEV;
8912  goto out_free_sysfs_attr;
8913  }
8914  /* SLI-3 HBA setup */
8915  if (lpfc_sli_hba_setup(phba)) {
8917  "1477 Failed to set up hba\n");
8918  error = -ENODEV;
8919  goto out_remove_device;
8920  }
8921 
8922  /* Wait 50ms for the interrupts of previous mailbox commands */
8923  msleep(50);
8924  /* Check active interrupts on message signaled interrupts */
8925  if (intr_mode == 0 ||
8926  phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8927  /* Log the current active interrupt mode */
8928  phba->intr_mode = intr_mode;
8929  lpfc_log_intr_mode(phba, intr_mode);
8930  break;
8931  } else {
8933  "0447 Configure interrupt mode (%d) "
8934  "failed active interrupt test.\n",
8935  intr_mode);
8936  /* Disable the current interrupt mode */
8937  lpfc_sli_disable_intr(phba);
8938  /* Try next level of interrupt mode */
8939  cfg_mode = --intr_mode;
8940  }
8941  }
8942 
8943  /* Perform post initialization setup */
8944  lpfc_post_init_setup(phba);
8945 
8946  /* Check if there are static vports to be created. */
8948 
8949  return 0;
8950 
8951 out_remove_device:
8952  lpfc_unset_hba(phba);
8953 out_free_sysfs_attr:
8954  lpfc_free_sysfs_attr(vport);
8955 out_destroy_shost:
8956  lpfc_destroy_shost(phba);
8957 out_unset_driver_resource:
8958  lpfc_unset_driver_resource_phase2(phba);
8959 out_free_iocb_list:
8960  lpfc_free_iocb_list(phba);
8961 out_unset_driver_resource_s3:
8962  lpfc_sli_driver_resource_unset(phba);
8963 out_unset_pci_mem_s3:
8964  lpfc_sli_pci_mem_unset(phba);
8965 out_disable_pci_dev:
8966  lpfc_disable_pci_dev(phba);
8967  if (shost)
8968  scsi_host_put(shost);
8969 out_free_phba:
8970  lpfc_hba_free(phba);
8971  return error;
8972 }
8973 
8983 static void __devexit
8984 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8985 {
8986  struct Scsi_Host *shost = pci_get_drvdata(pdev);
8987  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8988  struct lpfc_vport **vports;
8989  struct lpfc_hba *phba = vport->phba;
8990  int i;
8991  int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8992 
8993  spin_lock_irq(&phba->hbalock);
8994  vport->load_flag |= FC_UNLOADING;
8995  spin_unlock_irq(&phba->hbalock);
8996 
8997  lpfc_free_sysfs_attr(vport);
8998 
8999  /* Release all the vports against this physical port */
9000  vports = lpfc_create_vport_work_array(phba);
9001  if (vports != NULL)
9002  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9003  if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9004  continue;
9005  fc_vport_terminate(vports[i]->fc_vport);
9006  }
9007  lpfc_destroy_vport_work_array(phba, vports);
9008 
9009  /* Remove FC host and then SCSI host with the physical port */
9010  fc_remove_host(shost);
9011  scsi_remove_host(shost);
9012  lpfc_cleanup(vport);
9013 
9014  /*
9015  * Bring down the SLI Layer. This step disable all interrupts,
9016  * clears the rings, discards all mailbox commands, and resets
9017  * the HBA.
9018  */
9019 
9020  /* HBA interrupt will be disabled after this call */
9021  lpfc_sli_hba_down(phba);
9022  /* Stop kthread signal shall trigger work_done one more time */
9023  kthread_stop(phba->worker_thread);
9024  /* Final cleanup of txcmplq and reset the HBA */
9025  lpfc_sli_brdrestart(phba);
9026 
9027  kfree(phba->vpi_bmask);
9028  kfree(phba->vpi_ids);
9029 
9030  lpfc_stop_hba_timers(phba);
9031  spin_lock_irq(&phba->hbalock);
9032  list_del_init(&vport->listentry);
9033  spin_unlock_irq(&phba->hbalock);
9034 
9035  lpfc_debugfs_terminate(vport);
9036 
9037  /* Disable SR-IOV if enabled */
9038  if (phba->cfg_sriov_nr_virtfn)
9039  pci_disable_sriov(pdev);
9040 
9041  /* Disable interrupt */
9042  lpfc_sli_disable_intr(phba);
9043 
9044  pci_set_drvdata(pdev, NULL);
9045  scsi_host_put(shost);
9046 
9047  /*
9048  * Call scsi_free before mem_free since scsi bufs are released to their
9049  * corresponding pools here.
9050  */
9051  lpfc_scsi_free(phba);
9052  lpfc_mem_free_all(phba);
9053 
9055  phba->hbqslimp.virt, phba->hbqslimp.phys);
9056 
9057  /* Free resources associated with SLI2 interface */
9059  phba->slim2p.virt, phba->slim2p.phys);
9060 
9061  /* unmap adapter SLIM and Control Registers */
9062  iounmap(phba->ctrl_regs_memmap_p);
9063  iounmap(phba->slim_memmap_p);
9064 
9065  lpfc_hba_free(phba);
9066 
9067  pci_release_selected_regions(pdev, bars);
9068  pci_disable_device(pdev);
9069 }
9070 
9092 static int
9093 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9094 {
9095  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9096  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9097 
9099  "0473 PCI device Power Management suspend.\n");
9100 
9101  /* Bring down the device */
9103  lpfc_offline(phba);
9104  kthread_stop(phba->worker_thread);
9105 
9106  /* Disable interrupt from device */
9107  lpfc_sli_disable_intr(phba);
9108 
9109  /* Save device state to PCI config space */
9110  pci_save_state(pdev);
9112 
9113  return 0;
9114 }
9115 
9135 static int
9136 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9137 {
9138  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9139  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9140  uint32_t intr_mode;
9141  int error;
9142 
9144  "0452 PCI device Power Management resume.\n");
9145 
9146  /* Restore device state from PCI config space */
9147  pci_set_power_state(pdev, PCI_D0);
9148  pci_restore_state(pdev);
9149 
9150  /*
9151  * As the new kernel behavior of pci_restore_state() API call clears
9152  * device saved_state flag, need to save the restored state again.
9153  */
9154  pci_save_state(pdev);
9155 
9156  if (pdev->is_busmaster)
9157  pci_set_master(pdev);
9158 
9159  /* Startup the kernel thread for this host adapter. */
9160  phba->worker_thread = kthread_run(lpfc_do_work, phba,
9161  "lpfc_worker_%d", phba->brd_no);
9162  if (IS_ERR(phba->worker_thread)) {
9163  error = PTR_ERR(phba->worker_thread);
9165  "0434 PM resume failed to start worker "
9166  "thread: error=x%x.\n", error);
9167  return error;
9168  }
9169 
9170  /* Configure and enable interrupt */
9171  intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9172  if (intr_mode == LPFC_INTR_ERROR) {
9174  "0430 PM resume Failed to enable interrupt\n");
9175  return -EIO;
9176  } else
9177  phba->intr_mode = intr_mode;
9178 
9179  /* Restart HBA and bring it online */
9180  lpfc_sli_brdrestart(phba);
9181  lpfc_online(phba);
9182 
9183  /* Log the current active interrupt mode */
9184  lpfc_log_intr_mode(phba, phba->intr_mode);
9185 
9186  return 0;
9187 }
9188 
9196 static void
9197 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9198 {
9199  struct lpfc_sli *psli = &phba->sli;
9200  struct lpfc_sli_ring *pring;
9201 
9203  "2723 PCI channel I/O abort preparing for recovery\n");
9204 
9205  /*
9206  * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9207  * and let the SCSI mid-layer to retry them to recover.
9208  */
9209  pring = &psli->ring[psli->fcp_ring];
9210  lpfc_sli_abort_iocb_ring(phba, pring);
9211 }
9212 
9221 static void
9222 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9223 {
9225  "2710 PCI channel disable preparing for reset\n");
9226 
9227  /* Block any management I/Os to the device */
9228  lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
9229 
9230  /* Block all SCSI devices' I/Os on the host */
9231  lpfc_scsi_dev_block(phba);
9232 
9233  /* stop all timers */
9234  lpfc_stop_hba_timers(phba);
9235 
9236  /* Disable interrupt and pci device */
9237  lpfc_sli_disable_intr(phba);
9238  pci_disable_device(phba->pcidev);
9239 
9240  /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9242 }
9243 
9252 static void
9253 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9254 {
9256  "2711 PCI channel permanent disable for failure\n");
9257  /* Block all SCSI devices' I/Os on the host */
9258  lpfc_scsi_dev_block(phba);
9259 
9260  /* stop all timers */
9261  lpfc_stop_hba_timers(phba);
9262 
9263  /* Clean up all driver's outstanding SCSI I/Os */
9265 }
9266 
9285 static pci_ers_result_t
9286 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9287 {
9288  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9289  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9290 
9291  switch (state) {
9292  case pci_channel_io_normal:
9293  /* Non-fatal error, prepare for recovery */
9294  lpfc_sli_prep_dev_for_recover(phba);
9296  case pci_channel_io_frozen:
9297  /* Fatal error, prepare for slot reset */
9298  lpfc_sli_prep_dev_for_reset(phba);
9301  /* Permanent failure, prepare for device down */
9302  lpfc_sli_prep_dev_for_perm_failure(phba);
9304  default:
9305  /* Unknown state, prepare and request slot reset */
9307  "0472 Unknown PCI error state: x%x\n", state);
9308  lpfc_sli_prep_dev_for_reset(phba);
9310  }
9311 }
9312 
9331 static pci_ers_result_t
9332 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9333 {
9334  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9335  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9336  struct lpfc_sli *psli = &phba->sli;
9337  uint32_t intr_mode;
9338 
9339  dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9340  if (pci_enable_device_mem(pdev)) {
9341  printk(KERN_ERR "lpfc: Cannot re-enable "
9342  "PCI device after reset.\n");
9344  }
9345 
9346  pci_restore_state(pdev);
9347 
9348  /*
9349  * As the new kernel behavior of pci_restore_state() API call clears
9350  * device saved_state flag, need to save the restored state again.
9351  */
9352  pci_save_state(pdev);
9353 
9354  if (pdev->is_busmaster)
9355  pci_set_master(pdev);
9356 
9357  spin_lock_irq(&phba->hbalock);
9358  psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9359  spin_unlock_irq(&phba->hbalock);
9360 
9361  /* Configure and enable interrupt */
9362  intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9363  if (intr_mode == LPFC_INTR_ERROR) {
9365  "0427 Cannot re-enable interrupt after "
9366  "slot reset.\n");
9368  } else
9369  phba->intr_mode = intr_mode;
9370 
9371  /* Take device offline, it will perform cleanup */
9373  lpfc_offline(phba);
9374  lpfc_sli_brdrestart(phba);
9375 
9376  /* Log the current active interrupt mode */
9377  lpfc_log_intr_mode(phba, phba->intr_mode);
9378 
9379  return PCI_ERS_RESULT_RECOVERED;
9380 }
9381 
9392 static void
9393 lpfc_io_resume_s3(struct pci_dev *pdev)
9394 {
9395  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9396  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9397 
9398  /* Bring device online, it will be no-op for non-fatal error resume */
9399  lpfc_online(phba);
9400 
9401  /* Clean up Advanced Error Reporting (AER) if needed */
9402  if (phba->hba_flag & HBA_AER_ENABLED)
9404 }
9405 
9412 int
9414 {
9415  int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9416 
9417  if (phba->sli_rev == LPFC_SLI_REV4) {
9418  if (max_xri <= 100)
9419  return 10;
9420  else if (max_xri <= 256)
9421  return 25;
9422  else if (max_xri <= 512)
9423  return 50;
9424  else if (max_xri <= 1024)
9425  return 100;
9426  else if (max_xri <= 1536)
9427  return 150;
9428  else if (max_xri <= 2048)
9429  return 200;
9430  else
9431  return 250;
9432  } else
9433  return 0;
9434 }
9435 
9442 static void
9443 lpfc_write_firmware(const struct firmware *fw, void *context)
9444 {
9445  struct lpfc_hba *phba = (struct lpfc_hba *)context;
9446  char fwrev[FW_REV_STR_SIZE];
9447  struct lpfc_grp_hdr *image;
9448  struct list_head dma_buffer_list;
9449  int i, rc = 0;
9450  struct lpfc_dmabuf *dmabuf, *next;
9451  uint32_t offset = 0, temp_offset = 0;
9452 
9453  /* It can be null, sanity check */
9454  if (!fw) {
9455  rc = -ENXIO;
9456  goto out;
9457  }
9458  image = (struct lpfc_grp_hdr *)fw->data;
9459 
9460  INIT_LIST_HEAD(&dma_buffer_list);
9462  (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9464  (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9465  (be32_to_cpu(image->size) != fw->size)) {
9467  "3022 Invalid FW image found. "
9468  "Magic:%x Type:%x ID:%x\n",
9469  be32_to_cpu(image->magic_number),
9470  bf_get_be32(lpfc_grp_hdr_file_type, image),
9471  bf_get_be32(lpfc_grp_hdr_id, image));
9472  rc = -EINVAL;
9473  goto release_out;
9474  }
9475  lpfc_decode_firmware_rev(phba, fwrev, 1);
9476  if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9478  "3023 Updating Firmware, Current Version:%s "
9479  "New Version:%s\n",
9480  fwrev, image->revision);
9481  for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9482  dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9483  GFP_KERNEL);
9484  if (!dmabuf) {
9485  rc = -ENOMEM;
9486  goto release_out;
9487  }
9488  dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9490  &dmabuf->phys,
9491  GFP_KERNEL);
9492  if (!dmabuf->virt) {
9493  kfree(dmabuf);
9494  rc = -ENOMEM;
9495  goto release_out;
9496  }
9497  list_add_tail(&dmabuf->list, &dma_buffer_list);
9498  }
9499  while (offset < fw->size) {
9500  temp_offset = offset;
9501  list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9502  if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9503  memcpy(dmabuf->virt,
9504  fw->data + temp_offset,
9505  fw->size - temp_offset);
9506  temp_offset = fw->size;
9507  break;
9508  }
9509  memcpy(dmabuf->virt, fw->data + temp_offset,
9510  SLI4_PAGE_SIZE);
9511  temp_offset += SLI4_PAGE_SIZE;
9512  }
9513  rc = lpfc_wr_object(phba, &dma_buffer_list,
9514  (fw->size - offset), &offset);
9515  if (rc)
9516  goto release_out;
9517  }
9518  rc = offset;
9519  }
9520 
9521 release_out:
9522  list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9523  list_del(&dmabuf->list);
9525  dmabuf->virt, dmabuf->phys);
9526  kfree(dmabuf);
9527  }
9528  release_firmware(fw);
9529 out:
9531  "3024 Firmware update done: %d.", rc);
9532  return;
9533 }
9534 
9553 static int __devinit
9554 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9555 {
9556  struct lpfc_hba *phba;
9557  struct lpfc_vport *vport = NULL;
9558  struct Scsi_Host *shost = NULL;
9559  int error, ret;
9560  uint32_t cfg_mode, intr_mode;
9561  int mcnt;
9562  int adjusted_fcp_io_channel;
9564 
9565  /* Allocate memory for HBA structure */
9566  phba = lpfc_hba_alloc(pdev);
9567  if (!phba)
9568  return -ENOMEM;
9569 
9570  /* Perform generic PCI device enabling operation */
9571  error = lpfc_enable_pci_dev(phba);
9572  if (error)
9573  goto out_free_phba;
9574 
9575  /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9576  error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9577  if (error)
9578  goto out_disable_pci_dev;
9579 
9580  /* Set up SLI-4 specific device PCI memory space */
9581  error = lpfc_sli4_pci_mem_setup(phba);
9582  if (error) {
9584  "1410 Failed to set up pci memory space.\n");
9585  goto out_disable_pci_dev;
9586  }
9587 
9588  /* Set up phase-1 common device driver resources */
9589  error = lpfc_setup_driver_resource_phase1(phba);
9590  if (error) {
9592  "1411 Failed to set up driver resource.\n");
9593  goto out_unset_pci_mem_s4;
9594  }
9595 
9596  /* Set up SLI-4 Specific device driver resources */
9597  error = lpfc_sli4_driver_resource_setup(phba);
9598  if (error) {
9600  "1412 Failed to set up driver resource.\n");
9601  goto out_unset_pci_mem_s4;
9602  }
9603 
9604  /* Initialize and populate the iocb list per host */
9605 
9607  "2821 initialize iocb list %d.\n",
9608  phba->cfg_iocb_cnt*1024);
9609  error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9610 
9611  if (error) {
9613  "1413 Failed to initialize iocb list.\n");
9614  goto out_unset_driver_resource_s4;
9615  }
9616 
9617  INIT_LIST_HEAD(&phba->active_rrq_list);
9618  INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9619 
9620  /* Set up common device driver resources */
9621  error = lpfc_setup_driver_resource_phase2(phba);
9622  if (error) {
9624  "1414 Failed to set up driver resource.\n");
9625  goto out_free_iocb_list;
9626  }
9627 
9628  /* Get the default values for Model Name and Description */
9629  lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9630 
9631  /* Create SCSI host to the physical port */
9632  error = lpfc_create_shost(phba);
9633  if (error) {
9635  "1415 Failed to create scsi host.\n");
9636  goto out_unset_driver_resource;
9637  }
9638 
9639  /* Configure sysfs attributes */
9640  vport = phba->pport;
9641  error = lpfc_alloc_sysfs_attr(vport);
9642  if (error) {
9644  "1416 Failed to allocate sysfs attr\n");
9645  goto out_destroy_shost;
9646  }
9647 
9648  shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9649  /* Now, trying to enable interrupt and bring up the device */
9650  cfg_mode = phba->cfg_use_msi;
9651  while (true) {
9652  /* Put device to a known state before enabling interrupt */
9653  lpfc_stop_port(phba);
9654  /* Configure and enable interrupt */
9655  intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9656  if (intr_mode == LPFC_INTR_ERROR) {
9658  "0426 Failed to enable interrupt.\n");
9659  error = -ENODEV;
9660  goto out_free_sysfs_attr;
9661  }
9662  /* Default to single EQ for non-MSI-X */
9663  if (phba->intr_type != MSIX)
9664  adjusted_fcp_io_channel = 1;
9665  else
9666  adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9667  phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9668  /* Set up SLI-4 HBA */
9669  if (lpfc_sli4_hba_setup(phba)) {
9671  "1421 Failed to set up hba\n");
9672  error = -ENODEV;
9673  goto out_disable_intr;
9674  }
9675 
9676  /* Send NOP mbx cmds for non-INTx mode active interrupt test */
9677  if (intr_mode != 0)
9678  mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9680 
9681  /* Check active interrupts received only for MSI/MSI-X */
9682  if (intr_mode == 0 ||
9683  phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9684  /* Log the current active interrupt mode */
9685  phba->intr_mode = intr_mode;
9686  lpfc_log_intr_mode(phba, intr_mode);
9687  break;
9688  }
9690  "0451 Configure interrupt mode (%d) "
9691  "failed active interrupt test.\n",
9692  intr_mode);
9693  /* Unset the previous SLI-4 HBA setup. */
9694  /*
9695  * TODO: Is this operation compatible with IF TYPE 2
9696  * devices? All port state is deleted and cleared.
9697  */
9698  lpfc_sli4_unset_hba(phba);
9699  /* Try next level of interrupt mode */
9700  cfg_mode = --intr_mode;
9701  }
9702 
9703  /* Perform post initialization setup */
9704  lpfc_post_init_setup(phba);
9705 
9706  /* check for firmware upgrade or downgrade (if_type 2 only) */
9707  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9709  snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
9710  phba->ModelName);
9712  file_name, &phba->pcidev->dev,
9713  GFP_KERNEL, (void *)phba,
9714  lpfc_write_firmware);
9715  }
9716 
9717  /* Check if there are static vports to be created. */
9719  return 0;
9720 
9721 out_disable_intr:
9722  lpfc_sli4_disable_intr(phba);
9723 out_free_sysfs_attr:
9724  lpfc_free_sysfs_attr(vport);
9725 out_destroy_shost:
9726  lpfc_destroy_shost(phba);
9727 out_unset_driver_resource:
9728  lpfc_unset_driver_resource_phase2(phba);
9729 out_free_iocb_list:
9730  lpfc_free_iocb_list(phba);
9731 out_unset_driver_resource_s4:
9732  lpfc_sli4_driver_resource_unset(phba);
9733 out_unset_pci_mem_s4:
9734  lpfc_sli4_pci_mem_unset(phba);
9735 out_disable_pci_dev:
9736  lpfc_disable_pci_dev(phba);
9737  if (shost)
9738  scsi_host_put(shost);
9739 out_free_phba:
9740  lpfc_hba_free(phba);
9741  return error;
9742 }
9743 
9753 static void __devexit
9754 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9755 {
9756  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9757  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9758  struct lpfc_vport **vports;
9759  struct lpfc_hba *phba = vport->phba;
9760  int i;
9761 
9762  /* Mark the device unloading flag */
9763  spin_lock_irq(&phba->hbalock);
9764  vport->load_flag |= FC_UNLOADING;
9765  spin_unlock_irq(&phba->hbalock);
9766 
9767  /* Free the HBA sysfs attributes */
9768  lpfc_free_sysfs_attr(vport);
9769 
9770  /* Release all the vports against this physical port */
9771  vports = lpfc_create_vport_work_array(phba);
9772  if (vports != NULL)
9773  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9774  if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9775  continue;
9776  fc_vport_terminate(vports[i]->fc_vport);
9777  }
9778  lpfc_destroy_vport_work_array(phba, vports);
9779 
9780  /* Remove FC host and then SCSI host with the physical port */
9781  fc_remove_host(shost);
9782  scsi_remove_host(shost);
9783 
9784  /* Perform cleanup on the physical port */
9785  lpfc_cleanup(vport);
9786 
9787  /*
9788  * Bring down the SLI Layer. This step disables all interrupts,
9789  * clears the rings, discards all mailbox commands, and resets
9790  * the HBA FCoE function.
9791  */
9792  lpfc_debugfs_terminate(vport);
9793  lpfc_sli4_hba_unset(phba);
9794 
9795  spin_lock_irq(&phba->hbalock);
9796  list_del_init(&vport->listentry);
9797  spin_unlock_irq(&phba->hbalock);
9798 
9799  /* Perform scsi free before driver resource_unset since scsi
9800  * buffers are released to their corresponding pools here.
9801  */
9802  lpfc_scsi_free(phba);
9803 
9804  lpfc_sli4_driver_resource_unset(phba);
9805 
9806  /* Unmap adapter Control and Doorbell registers */
9807  lpfc_sli4_pci_mem_unset(phba);
9808 
9809  /* Release PCI resources and disable device's PCI function */
9810  scsi_host_put(shost);
9811  lpfc_disable_pci_dev(phba);
9812 
9813  /* Finally, free the driver's device data structure */
9814  lpfc_hba_free(phba);
9815 
9816  return;
9817 }
9818 
9840 static int
9841 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9842 {
9843  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9844  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9845 
9847  "2843 PCI device Power Management suspend.\n");
9848 
9849  /* Bring down the device */
9851  lpfc_offline(phba);
9852  kthread_stop(phba->worker_thread);
9853 
9854  /* Disable interrupt from device */
9855  lpfc_sli4_disable_intr(phba);
9857 
9858  /* Save device state to PCI config space */
9859  pci_save_state(pdev);
9861 
9862  return 0;
9863 }
9864 
9884 static int
9885 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9886 {
9887  struct Scsi_Host *shost = pci_get_drvdata(pdev);
9888  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9889  uint32_t intr_mode;
9890  int error;
9891 
9893  "0292 PCI device Power Management resume.\n");
9894 
9895  /* Restore device state from PCI config space */
9896  pci_set_power_state(pdev, PCI_D0);
9897  pci_restore_state(pdev);
9898 
9899  /*
9900  * As the new kernel behavior of pci_restore_state() API call clears
9901  * device saved_state flag, need to save the restored state again.
9902  */
9903  pci_save_state(pdev);
9904 
9905  if (pdev->is_busmaster)
9906  pci_set_master(pdev);
9907 
9908  /* Startup the kernel thread for this host adapter. */
9909  phba->worker_thread = kthread_run(lpfc_do_work, phba,
9910  "lpfc_worker_%d", phba->brd_no);
9911  if (IS_ERR(phba->worker_thread)) {
9912  error = PTR_ERR(phba->worker_thread);
9914  "0293 PM resume failed to start worker "
9915  "thread: error=x%x.\n", error);
9916  return error;
9917  }
9918 
9919  /* Configure and enable interrupt */
9920  intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9921  if (intr_mode == LPFC_INTR_ERROR) {
9923  "0294 PM resume Failed to enable interrupt\n");
9924  return -EIO;
9925  } else
9926  phba->intr_mode = intr_mode;
9927 
9928  /* Restart HBA and bring it online */
9929  lpfc_sli_brdrestart(phba);
9930  lpfc_online(phba);
9931 
9932  /* Log the current active interrupt mode */
9933  lpfc_log_intr_mode(phba, phba->intr_mode);
9934 
9935  return 0;
9936 }
9937 
9945 static void
9946 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9947 {
9948  struct lpfc_sli *psli = &phba->sli;
9949  struct lpfc_sli_ring *pring;
9950 
9952  "2828 PCI channel I/O abort preparing for recovery\n");
9953  /*
9954  * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9955  * and let the SCSI mid-layer to retry them to recover.
9956  */
9957  pring = &psli->ring[psli->fcp_ring];
9958  lpfc_sli_abort_iocb_ring(phba, pring);
9959 }
9960 
9969 static void
9970 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9971 {
9973  "2826 PCI channel disable preparing for reset\n");
9974 
9975  /* Block any management I/Os to the device */
9976  lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
9977 
9978  /* Block all SCSI devices' I/Os on the host */
9979  lpfc_scsi_dev_block(phba);
9980 
9981  /* stop all timers */
9982  lpfc_stop_hba_timers(phba);
9983 
9984  /* Disable interrupt and pci device */
9985  lpfc_sli4_disable_intr(phba);
9987  pci_disable_device(phba->pcidev);
9988 
9989  /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9991 }
9992 
10001 static void
10002 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10003 {
10005  "2827 PCI channel permanent disable for failure\n");
10006 
10007  /* Block all SCSI devices' I/Os on the host */
10008  lpfc_scsi_dev_block(phba);
10009 
10010  /* stop all timers */
10011  lpfc_stop_hba_timers(phba);
10012 
10013  /* Clean up all driver's outstanding SCSI I/Os */
10015 }
10016 
10033 static pci_ers_result_t
10034 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10035 {
10036  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10037  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10038 
10039  switch (state) {
10040  case pci_channel_io_normal:
10041  /* Non-fatal error, prepare for recovery */
10042  lpfc_sli4_prep_dev_for_recover(phba);
10044  case pci_channel_io_frozen:
10045  /* Fatal error, prepare for slot reset */
10046  lpfc_sli4_prep_dev_for_reset(phba);
10049  /* Permanent failure, prepare for device down */
10050  lpfc_sli4_prep_dev_for_perm_failure(phba);
10052  default:
10053  /* Unknown state, prepare and request slot reset */
10055  "2825 Unknown PCI error state: x%x\n", state);
10056  lpfc_sli4_prep_dev_for_reset(phba);
10058  }
10059 }
10060 
10079 static pci_ers_result_t
10080 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10081 {
10082  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10083  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10084  struct lpfc_sli *psli = &phba->sli;
10085  uint32_t intr_mode;
10086 
10087  dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10088  if (pci_enable_device_mem(pdev)) {
10089  printk(KERN_ERR "lpfc: Cannot re-enable "
10090  "PCI device after reset.\n");
10092  }
10093 
10094  pci_restore_state(pdev);
10095 
10096  /*
10097  * As the new kernel behavior of pci_restore_state() API call clears
10098  * device saved_state flag, need to save the restored state again.
10099  */
10100  pci_save_state(pdev);
10101 
10102  if (pdev->is_busmaster)
10103  pci_set_master(pdev);
10104 
10105  spin_lock_irq(&phba->hbalock);
10106  psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10107  spin_unlock_irq(&phba->hbalock);
10108 
10109  /* Configure and enable interrupt */
10110  intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10111  if (intr_mode == LPFC_INTR_ERROR) {
10113  "2824 Cannot re-enable interrupt after "
10114  "slot reset.\n");
10116  } else
10117  phba->intr_mode = intr_mode;
10118 
10119  /* Log the current active interrupt mode */
10120  lpfc_log_intr_mode(phba, phba->intr_mode);
10121 
10122  return PCI_ERS_RESULT_RECOVERED;
10123 }
10124 
10135 static void
10136 lpfc_io_resume_s4(struct pci_dev *pdev)
10137 {
10138  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10139  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10140 
10141  /*
10142  * In case of slot reset, as function reset is performed through
10143  * mailbox command which needs DMA to be enabled, this operation
10144  * has to be moved to the io resume phase. Taking device offline
10145  * will perform the necessary cleanup.
10146  */
10147  if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10148  /* Perform device reset */
10150  lpfc_offline(phba);
10151  lpfc_sli_brdrestart(phba);
10152  /* Bring the device back online */
10153  lpfc_online(phba);
10154  }
10155 
10156  /* Clean up Advanced Error Reporting (AER) if needed */
10157  if (phba->hba_flag & HBA_AER_ENABLED)
10159 }
10160 
10179 static int __devinit
10180 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10181 {
10182  int rc;
10183  struct lpfc_sli_intf intf;
10184 
10185  if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
10186  return -ENODEV;
10187 
10188  if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
10189  (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
10190  rc = lpfc_pci_probe_one_s4(pdev, pid);
10191  else
10192  rc = lpfc_pci_probe_one_s3(pdev, pid);
10193 
10194  return rc;
10195 }
10196 
10207 static void __devexit
10208 lpfc_pci_remove_one(struct pci_dev *pdev)
10209 {
10210  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10211  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10212 
10213  switch (phba->pci_dev_grp) {
10214  case LPFC_PCI_DEV_LP:
10215  lpfc_pci_remove_one_s3(pdev);
10216  break;
10217  case LPFC_PCI_DEV_OC:
10218  lpfc_pci_remove_one_s4(pdev);
10219  break;
10220  default:
10222  "1424 Invalid PCI device group: 0x%x\n",
10223  phba->pci_dev_grp);
10224  break;
10225  }
10226  return;
10227 }
10228 
10243 static int
10244 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10245 {
10246  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10247  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10248  int rc = -ENODEV;
10249 
10250  switch (phba->pci_dev_grp) {
10251  case LPFC_PCI_DEV_LP:
10252  rc = lpfc_pci_suspend_one_s3(pdev, msg);
10253  break;
10254  case LPFC_PCI_DEV_OC:
10255  rc = lpfc_pci_suspend_one_s4(pdev, msg);
10256  break;
10257  default:
10259  "1425 Invalid PCI device group: 0x%x\n",
10260  phba->pci_dev_grp);
10261  break;
10262  }
10263  return rc;
10264 }
10265 
10279 static int
10280 lpfc_pci_resume_one(struct pci_dev *pdev)
10281 {
10282  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10283  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10284  int rc = -ENODEV;
10285 
10286  switch (phba->pci_dev_grp) {
10287  case LPFC_PCI_DEV_LP:
10288  rc = lpfc_pci_resume_one_s3(pdev);
10289  break;
10290  case LPFC_PCI_DEV_OC:
10291  rc = lpfc_pci_resume_one_s4(pdev);
10292  break;
10293  default:
10295  "1426 Invalid PCI device group: 0x%x\n",
10296  phba->pci_dev_grp);
10297  break;
10298  }
10299  return rc;
10300 }
10301 
10317 static pci_ers_result_t
10318 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10319 {
10320  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10321  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10323 
10324  switch (phba->pci_dev_grp) {
10325  case LPFC_PCI_DEV_LP:
10326  rc = lpfc_io_error_detected_s3(pdev, state);
10327  break;
10328  case LPFC_PCI_DEV_OC:
10329  rc = lpfc_io_error_detected_s4(pdev, state);
10330  break;
10331  default:
10333  "1427 Invalid PCI device group: 0x%x\n",
10334  phba->pci_dev_grp);
10335  break;
10336  }
10337  return rc;
10338 }
10339 
10354 static pci_ers_result_t
10355 lpfc_io_slot_reset(struct pci_dev *pdev)
10356 {
10357  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10358  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10360 
10361  switch (phba->pci_dev_grp) {
10362  case LPFC_PCI_DEV_LP:
10363  rc = lpfc_io_slot_reset_s3(pdev);
10364  break;
10365  case LPFC_PCI_DEV_OC:
10366  rc = lpfc_io_slot_reset_s4(pdev);
10367  break;
10368  default:
10370  "1428 Invalid PCI device group: 0x%x\n",
10371  phba->pci_dev_grp);
10372  break;
10373  }
10374  return rc;
10375 }
10376 
10387 static void
10388 lpfc_io_resume(struct pci_dev *pdev)
10389 {
10390  struct Scsi_Host *shost = pci_get_drvdata(pdev);
10391  struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10392 
10393  switch (phba->pci_dev_grp) {
10394  case LPFC_PCI_DEV_LP:
10395  lpfc_io_resume_s3(pdev);
10396  break;
10397  case LPFC_PCI_DEV_OC:
10398  lpfc_io_resume_s4(pdev);
10399  break;
10400  default:
10402  "1429 Invalid PCI device group: 0x%x\n",
10403  phba->pci_dev_grp);
10404  break;
10405  }
10406  return;
10407 }
10408 
10417 static int
10418 lpfc_mgmt_open(struct inode *inode, struct file *filep)
10419 {
10420  try_module_get(THIS_MODULE);
10421  return 0;
10422 }
10423 
10432 static int
10433 lpfc_mgmt_release(struct inode *inode, struct file *filep)
10434 {
10435  module_put(THIS_MODULE);
10436  return 0;
10437 }
10438 
10439 static struct pci_device_id lpfc_id_table[] = {
10441  PCI_ANY_ID, PCI_ANY_ID, },
10443  PCI_ANY_ID, PCI_ANY_ID, },
10445  PCI_ANY_ID, PCI_ANY_ID, },
10447  PCI_ANY_ID, PCI_ANY_ID, },
10449  PCI_ANY_ID, PCI_ANY_ID, },
10451  PCI_ANY_ID, PCI_ANY_ID, },
10453  PCI_ANY_ID, PCI_ANY_ID, },
10455  PCI_ANY_ID, PCI_ANY_ID, },
10457  PCI_ANY_ID, PCI_ANY_ID, },
10459  PCI_ANY_ID, PCI_ANY_ID, },
10461  PCI_ANY_ID, PCI_ANY_ID, },
10463  PCI_ANY_ID, PCI_ANY_ID, },
10465  PCI_ANY_ID, PCI_ANY_ID, },
10467  PCI_ANY_ID, PCI_ANY_ID, },
10469  PCI_ANY_ID, PCI_ANY_ID, },
10471  PCI_ANY_ID, PCI_ANY_ID, },
10473  PCI_ANY_ID, PCI_ANY_ID, },
10475  PCI_ANY_ID, PCI_ANY_ID, },
10477  PCI_ANY_ID, PCI_ANY_ID, },
10479  PCI_ANY_ID, PCI_ANY_ID, },
10481  PCI_ANY_ID, PCI_ANY_ID, },
10483  PCI_ANY_ID, PCI_ANY_ID, },
10485  PCI_ANY_ID, PCI_ANY_ID, },
10487  PCI_ANY_ID, PCI_ANY_ID, },
10489  PCI_ANY_ID, PCI_ANY_ID, },
10491  PCI_ANY_ID, PCI_ANY_ID, },
10493  PCI_ANY_ID, PCI_ANY_ID, },
10495  PCI_ANY_ID, PCI_ANY_ID, },
10497  PCI_ANY_ID, PCI_ANY_ID, },
10499  PCI_ANY_ID, PCI_ANY_ID, },
10501  PCI_ANY_ID, PCI_ANY_ID, },
10503  PCI_ANY_ID, PCI_ANY_ID, },
10505  PCI_ANY_ID, PCI_ANY_ID, },
10507  PCI_ANY_ID, PCI_ANY_ID, },
10509  PCI_ANY_ID, PCI_ANY_ID, },
10511  PCI_ANY_ID, PCI_ANY_ID, },
10513  PCI_ANY_ID, PCI_ANY_ID, },
10515  PCI_ANY_ID, PCI_ANY_ID, },
10517  PCI_ANY_ID, PCI_ANY_ID, },
10519  PCI_ANY_ID, PCI_ANY_ID, },
10521  PCI_ANY_ID, PCI_ANY_ID, },
10523  PCI_ANY_ID, PCI_ANY_ID, },
10525  PCI_ANY_ID, PCI_ANY_ID, },
10527  PCI_ANY_ID, PCI_ANY_ID, },
10529  PCI_ANY_ID, PCI_ANY_ID, },
10531  PCI_ANY_ID, PCI_ANY_ID, },
10533  PCI_ANY_ID, PCI_ANY_ID, },
10534  { 0 }
10535 };
10536 
10537 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10538 
10539 static const struct pci_error_handlers lpfc_err_handler = {
10540  .error_detected = lpfc_io_error_detected,
10541  .slot_reset = lpfc_io_slot_reset,
10542  .resume = lpfc_io_resume,
10543 };
10544 
10545 static struct pci_driver lpfc_driver = {
10546  .name = LPFC_DRIVER_NAME,
10547  .id_table = lpfc_id_table,
10548  .probe = lpfc_pci_probe_one,
10549  .remove = __devexit_p(lpfc_pci_remove_one),
10550  .suspend = lpfc_pci_suspend_one,
10551  .resume = lpfc_pci_resume_one,
10552  .err_handler = &lpfc_err_handler,
10553 };
10554 
10555 static const struct file_operations lpfc_mgmt_fop = {
10556  .open = lpfc_mgmt_open,
10557  .release = lpfc_mgmt_release,
10558 };
10559 
10560 static struct miscdevice lpfc_mgmt_dev = {
10561  .minor = MISC_DYNAMIC_MINOR,
10562  .name = "lpfcmgmt",
10563  .fops = &lpfc_mgmt_fop,
10564 };
10565 
10578 static int __init
10579 lpfc_init(void)
10580 {
10581  int error = 0;
10582 
10583  printk(LPFC_MODULE_DESC "\n");
10584  printk(LPFC_COPYRIGHT "\n");
10585 
10586  error = misc_register(&lpfc_mgmt_dev);
10587  if (error)
10588  printk(KERN_ERR "Could not register lpfcmgmt device, "
10589  "misc_register returned with status %d", error);
10590 
10591  if (lpfc_enable_npiv) {
10594  }
10595  lpfc_transport_template =
10597  if (lpfc_transport_template == NULL)
10598  return -ENOMEM;
10599  if (lpfc_enable_npiv) {
10600  lpfc_vport_transport_template =
10602  if (lpfc_vport_transport_template == NULL) {
10603  fc_release_transport(lpfc_transport_template);
10604  return -ENOMEM;
10605  }
10606  }
10607  error = pci_register_driver(&lpfc_driver);
10608  if (error) {
10609  fc_release_transport(lpfc_transport_template);
10610  if (lpfc_enable_npiv)
10611  fc_release_transport(lpfc_vport_transport_template);
10612  }
10613 
10614  return error;
10615 }
10616 
10624 static void __exit
10625 lpfc_exit(void)
10626 {
10627  misc_deregister(&lpfc_mgmt_dev);
10628  pci_unregister_driver(&lpfc_driver);
10629  fc_release_transport(lpfc_transport_template);
10630  if (lpfc_enable_npiv)
10631  fc_release_transport(lpfc_vport_transport_template);
10632  if (_dump_buf_data) {
10633  printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10634  "_dump_buf_data at 0x%p\n",
10637  }
10638 
10639  if (_dump_buf_dif) {
10640  printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10641  "_dump_buf_dif at 0x%p\n",
10643  free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10644  }
10645 }
10646 
10647 module_init(lpfc_init);
10648 module_exit(lpfc_exit);
10649 MODULE_LICENSE("GPL");
10651 MODULE_AUTHOR("Emulex Corporation - [email protected]");