Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_mbox.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_transport_fc.h>
29 #include <scsi/scsi.h>
30 #include <scsi/fc/fc_fs.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_compat.h"
43 
56 int
59 {
60  MAILBOX_t *mb;
61  struct lpfc_dmabuf *mp;
62 
63  mb = &pmb->u.mb;
64 
65  /* Setup to dump vport info region */
66  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
71  mb->mbxOwner = OWN_HOST;
72 
73  /* For SLI3 HBAs data is embedded in mailbox */
74  if (phba->sli_rev != LPFC_SLI_REV4) {
75  mb->un.varDmp.cv = 1;
76  mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77  return 0;
78  }
79 
80  /* For SLI4 HBAs driver need to allocate memory */
81  mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82  if (mp)
83  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84 
85  if (!mp || !mp->virt) {
86  kfree(mp);
88  "2605 lpfc_dump_static_vport: memory"
89  " allocation failed\n");
90  return 1;
91  }
92  memset(mp->virt, 0, LPFC_BPL_SIZE);
93  INIT_LIST_HEAD(&mp->list);
94  /* save address for completion */
95  pmb->context1 = (uint8_t *)mp;
96  mb->un.varWords[3] = putPaddrLow(mp->phys);
97  mb->un.varWords[4] = putPaddrHigh(mp->phys);
98  mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99 
100  return 0;
101 }
102 
110 void
112 {
113  MAILBOX_t *mb;
114  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115  mb = &pmb->u.mb;
117  mb->mbxOwner = OWN_HOST;
118 }
119 
132 void
134  uint16_t region_id)
135 {
136  MAILBOX_t *mb;
137  void *ctx;
138 
139  mb = &pmb->u.mb;
140  ctx = pmb->context2;
141 
142  /* Setup to dump VPD region */
143  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
145  mb->un.varDmp.cv = 1;
146  mb->un.varDmp.type = DMP_NV_PARAMS;
147  mb->un.varDmp.entry_index = offset;
148  mb->un.varDmp.region_id = region_id;
149  mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150  mb->un.varDmp.co = 0;
151  mb->un.varDmp.resp_offset = 0;
152  pmb->context2 = ctx;
153  mb->mbxOwner = OWN_HOST;
154  return;
155 }
156 
165 void
167 {
168  MAILBOX_t *mb;
169  void *ctx;
170 
171  mb = &pmb->u.mb;
172  /* Save context so that we can restore after memset */
173  ctx = pmb->context2;
174 
175  /* Setup to dump VPD region */
176  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
178  mb->mbxOwner = OWN_HOST;
179  mb->un.varDmp.cv = 1;
180  mb->un.varDmp.type = DMP_NV_PARAMS;
181  mb->un.varDmp.entry_index = 0;
184  mb->un.varDmp.co = 0;
185  mb->un.varDmp.resp_offset = 0;
186  pmb->context2 = ctx;
187  return;
188 }
189 
201 void
202 lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
203 {
204  MAILBOX_t *mb;
205 
206  mb = &pmb->u.mb;
207  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
208  mb->mbxCommand = MBX_READ_NV;
209  mb->mbxOwner = OWN_HOST;
210  return;
211 }
212 
226 void
228  uint32_t ring)
229 {
230  MAILBOX_t *mb;
231 
232  mb = &pmb->u.mb;
233  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
235  mb->un.varCfgAsyncEvent.ring = ring;
236  mb->mbxOwner = OWN_HOST;
237  return;
238 }
239 
253 void
254 lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
255 {
256  MAILBOX_t *mb;
257 
258  mb = &pmb->u.mb;
259  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
261  mb->mbxOwner = OWN_HOST;
262  return;
263 }
264 
286 int
288  struct lpfc_dmabuf *mp)
289 {
290  MAILBOX_t *mb;
291  struct lpfc_sli *psli;
292 
293  psli = &phba->sli;
294  mb = &pmb->u.mb;
295  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
296 
297  INIT_LIST_HEAD(&mp->list);
299  mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
300  mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
301  mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
302 
303  /* Save address for later completion and set the owner to host so that
304  * the FW knows this mailbox is available for processing.
305  */
306  pmb->context1 = (uint8_t *)mp;
307  mb->mbxOwner = OWN_HOST;
308  return (0);
309 }
310 
326 void
327 lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
328 {
329  MAILBOX_t *mb;
330 
331  mb = &pmb->u.mb;
332  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
333 
334  mb->un.varClearLA.eventTag = phba->fc_eventTag;
335  mb->mbxCommand = MBX_CLEAR_LA;
336  mb->mbxOwner = OWN_HOST;
337  return;
338 }
339 
354 void
356 {
357  struct lpfc_vport *vport = phba->pport;
358  MAILBOX_t *mb = &pmb->u.mb;
359  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
360 
361  /* NEW_FEATURE
362  * SLI-2, Coalescing Response Feature.
363  */
364  if (phba->cfg_cr_delay) {
365  mb->un.varCfgLnk.cr = 1;
366  mb->un.varCfgLnk.ci = 1;
367  mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
368  mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
369  }
370 
371  mb->un.varCfgLnk.myId = vport->fc_myDID;
372  mb->un.varCfgLnk.edtov = phba->fc_edtov;
373  mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
374  mb->un.varCfgLnk.ratov = phba->fc_ratov;
375  mb->un.varCfgLnk.rttov = phba->fc_rttov;
376  mb->un.varCfgLnk.altov = phba->fc_altov;
377  mb->un.varCfgLnk.crtov = phba->fc_crtov;
378  mb->un.varCfgLnk.citov = phba->fc_citov;
379 
380  if (phba->cfg_ack0)
381  mb->un.varCfgLnk.ack0_enable = 1;
382 
384  mb->mbxOwner = OWN_HOST;
385  return;
386 }
387 
401 int
403 {
404  MAILBOX_t *mb = &pmb->u.mb;
405  uint32_t attentionConditions[2];
406 
407  /* Sanity check */
408  if (phba->cfg_use_msi != 2) {
410  "0475 Not configured for supporting MSI-X "
411  "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
412  return -EINVAL;
413  }
414 
415  if (phba->sli_rev < 3) {
417  "0476 HBA not supporting SLI-3 or later "
418  "SLI Revision: 0x%x\n", phba->sli_rev);
419  return -EINVAL;
420  }
421 
422  /* Clear mailbox command fields */
423  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
424 
425  /*
426  * SLI-3, Message Signaled Interrupt Fearure.
427  */
428 
429  /* Multi-message attention configuration */
430  attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
431  HA_LATT | HA_MBATT);
432  attentionConditions[1] = 0;
433 
434  mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
435  mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
436 
437  /*
438  * Set up message number to HA bit association
439  */
440 #ifdef __BIG_ENDIAN_BITFIELD
441  /* RA0 (FCP Ring) */
442  mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
443  /* RA1 (Other Protocol Extra Ring) */
444  mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
445 #else /* __LITTLE_ENDIAN_BITFIELD */
446  /* RA0 (FCP Ring) */
447  mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
448  /* RA1 (Other Protocol Extra Ring) */
449  mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
450 #endif
451  /* Multi-message interrupt autoclear configuration*/
452  mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
453  mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
454 
455  /* For now, HBA autoclear does not work reliably, disable it */
456  mb->un.varCfgMSI.autoClearHA[0] = 0;
457  mb->un.varCfgMSI.autoClearHA[1] = 0;
458 
459  /* Set command and owner bit */
461  mb->mbxOwner = OWN_HOST;
462 
463  return 0;
464 }
465 
480 void
482  LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
483 {
484  lpfc_vpd_t *vpd;
485  struct lpfc_sli *psli;
486  MAILBOX_t *mb;
487 
488  mb = &pmb->u.mb;
489  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
490 
491  psli = &phba->sli;
492  switch (topology) {
496  break;
499  break;
502  break;
506  break;
507  case FLAGS_LOCAL_LB:
509  break;
510  }
511 
512  /* Enable asynchronous ABTS responses from firmware */
514 
515  /* NEW_FEATURE
516  * Setting up the link speed
517  */
518  vpd = &phba->vpd;
519  if (vpd->rev.feaLevelHigh >= 0x02){
520  switch(linkspeed){
524  break;
528  break;
532  break;
536  break;
540  break;
544  break;
546  default:
548  break;
549  }
550 
551  }
552  else
554 
555  mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
556  mb->mbxOwner = OWN_HOST;
558  return;
559 }
560 
582 int
583 lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
584 {
585  struct lpfc_dmabuf *mp;
586  MAILBOX_t *mb;
587  struct lpfc_sli *psli;
588 
589  psli = &phba->sli;
590  mb = &pmb->u.mb;
591  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
592 
593  mb->mbxOwner = OWN_HOST;
594 
595  /* Get a buffer to hold the HBAs Service Parameters */
596 
597  mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
598  if (mp)
599  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
600  if (!mp || !mp->virt) {
601  kfree(mp);
603  /* READ_SPARAM: no buffers */
605  "0301 READ_SPARAM: no buffers\n");
606  return (1);
607  }
608  INIT_LIST_HEAD(&mp->list);
610  mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611  mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612  mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613  if (phba->sli_rev >= LPFC_SLI_REV3)
614  mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
615 
616  /* save address for completion */
617  pmb->context1 = mp;
618 
619  return (0);
620 }
621 
637 void
638 lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
639  LPFC_MBOXQ_t * pmb)
640 {
641  MAILBOX_t *mb;
642 
643  mb = &pmb->u.mb;
644  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
645 
646  mb->un.varUnregDID.did = did;
647  mb->un.varUnregDID.vpi = vpi;
648  if ((vpi != 0xffff) &&
649  (phba->sli_rev == LPFC_SLI_REV4))
650  mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
651 
653  mb->mbxOwner = OWN_HOST;
654  return;
655 }
656 
670 void
672 {
673  MAILBOX_t *mb;
674 
675  mb = &pmb->u.mb;
676  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
677 
679  mb->mbxOwner = OWN_HOST;
680  return;
681 }
682 
695 void
697 {
698  MAILBOX_t *mb;
699 
700  mb = &pmb->u.mb;
701  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 
704  mb->mbxOwner = OWN_HOST;
705  return;
706 }
707 
732 int
733 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
734  uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
735 {
736  MAILBOX_t *mb = &pmb->u.mb;
737  uint8_t *sparam;
738  struct lpfc_dmabuf *mp;
739 
740  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
741 
742  mb->un.varRegLogin.rpi = 0;
743  if (phba->sli_rev == LPFC_SLI_REV4)
744  mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
745  if (phba->sli_rev >= LPFC_SLI_REV3)
746  mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
747  mb->un.varRegLogin.did = did;
748  mb->mbxOwner = OWN_HOST;
749  /* Get a buffer to hold NPorts Service Parameters */
750  mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
751  if (mp)
752  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
753  if (!mp || !mp->virt) {
754  kfree(mp);
756  /* REG_LOGIN: no buffers */
758  "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759  "rpi x%x\n", vpi, did, rpi);
760  return 1;
761  }
762  INIT_LIST_HEAD(&mp->list);
763  sparam = mp->virt;
764 
765  /* Copy param's into a new buffer */
766  memcpy(sparam, param, sizeof (struct serv_parm));
767 
768  /* save address for completion */
769  pmb->context1 = (uint8_t *) mp;
770 
772  mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
773  mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774  mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 
776  return 0;
777 }
778 
796 void
798  LPFC_MBOXQ_t * pmb)
799 {
800  MAILBOX_t *mb;
801 
802  mb = &pmb->u.mb;
803  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
804 
805  mb->un.varUnregLogin.rpi = rpi;
806  mb->un.varUnregLogin.rsvd1 = 0;
807  if (phba->sli_rev >= LPFC_SLI_REV3)
808  mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
809 
811  mb->mbxOwner = OWN_HOST;
812 
813  return;
814 }
815 
823 void
825 {
826  struct lpfc_hba *phba = vport->phba;
828  int rc;
829 
830  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
831  if (mbox) {
832  /*
833  * For SLI4 functions, the rpi field is overloaded for
834  * the vport context unreg all. This routine passes
835  * 0 for the rpi field in lpfc_unreg_login for compatibility
836  * with SLI3 and then overrides the rpi field with the
837  * expected value for SLI4.
838  */
839  lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840  mbox);
841  mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
842  mbox->vport = vport;
844  mbox->context1 = NULL;
845  rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
846  if (rc == MBX_NOT_FINISHED)
847  mempool_free(mbox, phba->mbox_mem_pool);
848  }
849 }
850 
866 void
868 {
869  MAILBOX_t *mb = &pmb->u.mb;
870  struct lpfc_hba *phba = vport->phba;
871 
872  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
873  /*
874  * Set the re-reg VPI bit for f/w to update the MAC address.
875  */
876  if ((phba->sli_rev == LPFC_SLI_REV4) &&
877  !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
878  mb->un.varRegVpi.upd = 1;
879 
880  mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
881  mb->un.varRegVpi.sid = vport->fc_myDID;
882  if (phba->sli_rev == LPFC_SLI_REV4)
883  mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884  else
885  mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
886  memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
887  sizeof(struct lpfc_name));
888  mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
889  mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
890 
891  mb->mbxCommand = MBX_REG_VPI;
892  mb->mbxOwner = OWN_HOST;
893  return;
894 
895 }
896 
913 void
915 {
916  MAILBOX_t *mb = &pmb->u.mb;
917  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
918 
919  if (phba->sli_rev == LPFC_SLI_REV3)
920  mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
921  else if (phba->sli_rev >= LPFC_SLI_REV4)
922  mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
923 
925  mb->mbxOwner = OWN_HOST;
926  return;
927 
928 }
929 
937 static void
938 lpfc_config_pcb_setup(struct lpfc_hba * phba)
939 {
940  struct lpfc_sli *psli = &phba->sli;
941  struct lpfc_sli_ring *pring;
942  PCB_t *pcbp = phba->pcb;
943  dma_addr_t pdma_addr;
945  uint32_t iocbCnt = 0;
946  int i;
947 
948  pcbp->maxRing = (psli->num_rings - 1);
949 
950  for (i = 0; i < psli->num_rings; i++) {
951  pring = &psli->ring[i];
952 
953  pring->sli.sli3.sizeCiocb =
954  phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
956  pring->sli.sli3.sizeRiocb =
957  phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
959  /* A ring MUST have both cmd and rsp entries defined to be
960  valid */
961  if ((pring->sli.sli3.numCiocb == 0) ||
962  (pring->sli.sli3.numRiocb == 0)) {
963  pcbp->rdsc[i].cmdEntries = 0;
964  pcbp->rdsc[i].rspEntries = 0;
965  pcbp->rdsc[i].cmdAddrHigh = 0;
966  pcbp->rdsc[i].rspAddrHigh = 0;
967  pcbp->rdsc[i].cmdAddrLow = 0;
968  pcbp->rdsc[i].rspAddrLow = 0;
969  pring->sli.sli3.cmdringaddr = NULL;
970  pring->sli.sli3.rspringaddr = NULL;
971  continue;
972  }
973  /* Command ring setup for ring */
974  pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
975  pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
976 
977  offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
978  (uint8_t *) phba->slim2p.virt;
979  pdma_addr = phba->slim2p.phys + offset;
980  pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
981  pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
982  iocbCnt += pring->sli.sli3.numCiocb;
983 
984  /* Response ring setup for ring */
985  pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
986 
987  pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
988  offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
989  (uint8_t *)phba->slim2p.virt;
990  pdma_addr = phba->slim2p.phys + offset;
991  pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
992  pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
993  iocbCnt += pring->sli.sli3.numRiocb;
994  }
995 }
996 
1011 void
1012 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1013 {
1014  MAILBOX_t *mb = &pmb->u.mb;
1015  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1016  mb->un.varRdRev.cv = 1;
1017  mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
1018  mb->mbxCommand = MBX_READ_REV;
1019  mb->mbxOwner = OWN_HOST;
1020  return;
1021 }
1022 
1023 void
1025 {
1026  MAILBOX_t *mb = &pmb->u.mb;
1027  struct lpfc_mqe *mqe;
1028 
1029  switch (mb->mbxCommand) {
1030  case MBX_READ_REV:
1031  mqe = &pmb->u.mqe;
1032  lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1033  mqe->un.read_rev.fw_name, 16);
1034  lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1035  mqe->un.read_rev.ulp_fw_name, 16);
1036  break;
1037  default:
1038  break;
1039  }
1040  return;
1041 }
1042 
1053 static void
1054 lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1055  struct lpfc_hbq_init *hbq_desc)
1056 {
1057  hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1058  hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1059  hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1060 }
1061 
1072 static void
1073 lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1074  struct lpfc_hbq_init *hbq_desc)
1075 {
1076  hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1077  hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1078  hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1079  hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1080  memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1081  sizeof(hbqmb->profiles.profile3.cmdmatch));
1082 }
1083 
1095 static void
1096 lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1097  struct lpfc_hbq_init *hbq_desc)
1098 {
1099  hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1100  hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1101  hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1102  hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1103  memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1104  sizeof(hbqmb->profiles.profile5.cmdmatch));
1105 }
1106 
1121 void
1123  struct lpfc_hbq_init *hbq_desc,
1124  uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1125 {
1126  int i;
1127  MAILBOX_t *mb = &pmb->u.mb;
1128  struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1129 
1130  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1131  hbqmb->hbqId = id;
1132  hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
1133  hbqmb->recvNotify = hbq_desc->rn; /* Receive
1134  * Notification */
1135  hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
1136  * # in words 0-19 */
1137  hbqmb->profile = hbq_desc->profile; /* Selection profile:
1138  * 0 = all,
1139  * 7 = logentry */
1140  hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
1141  * e.g. Ring0=b0001,
1142  * ring2=b0100 */
1143  hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
1144  * or 5 */
1145  hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
1146  * HBQ will be used
1147  * for LogEntry
1148  * buffers */
1149  hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1150  hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1151  hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1152 
1153  mb->mbxCommand = MBX_CONFIG_HBQ;
1154  mb->mbxOwner = OWN_HOST;
1155 
1156  /* Copy info for profiles 2,3,5. Other
1157  * profiles this area is reserved
1158  */
1159  if (hbq_desc->profile == 2)
1160  lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1161  else if (hbq_desc->profile == 3)
1162  lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1163  else if (hbq_desc->profile == 5)
1164  lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1165 
1166  /* Return if no rctl / type masks for this HBQ */
1167  if (!hbq_desc->mask_count)
1168  return;
1169 
1170  /* Otherwise we setup specific rctl / type masks for this HBQ */
1171  for (i = 0; i < hbq_desc->mask_count; i++) {
1172  hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1173  hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1174  hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1175  hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1176  }
1177 
1178  return;
1179 }
1180 
1198 void
1199 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1200 {
1201  int i;
1202  MAILBOX_t *mb = &pmb->u.mb;
1203  struct lpfc_sli *psli;
1204  struct lpfc_sli_ring *pring;
1205 
1206  memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1207 
1208  mb->un.varCfgRing.ring = ring;
1209  mb->un.varCfgRing.maxOrigXchg = 0;
1210  mb->un.varCfgRing.maxRespXchg = 0;
1211  mb->un.varCfgRing.recvNotify = 1;
1212 
1213  psli = &phba->sli;
1214  pring = &psli->ring[ring];
1215  mb->un.varCfgRing.numMask = pring->num_mask;
1217  mb->mbxOwner = OWN_HOST;
1218 
1219  /* Is this ring configured for a specific profile */
1220  if (pring->prt[0].profile) {
1221  mb->un.varCfgRing.profile = pring->prt[0].profile;
1222  return;
1223  }
1224 
1225  /* Otherwise we setup specific rctl / type masks for this ring */
1226  for (i = 0; i < pring->num_mask; i++) {
1227  mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1228  if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1229  mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1230  else
1231  mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1232  mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1233  mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1234  }
1235 
1236  return;
1237 }
1238 
1253 void
1255 {
1256  MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1257  MAILBOX_t *mb = &pmb->u.mb;
1258  dma_addr_t pdma_addr;
1259  uint32_t bar_low, bar_high;
1260  size_t offset;
1261  struct lpfc_hgp hgp;
1262  int i;
1263  uint32_t pgp_offset;
1264 
1265  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1266  mb->mbxCommand = MBX_CONFIG_PORT;
1267  mb->mbxOwner = OWN_HOST;
1268 
1269  mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1270 
1271  offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1272  pdma_addr = phba->slim2p.phys + offset;
1273  mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1274  mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1275 
1276  /* Always Host Group Pointer is in SLIM */
1277  mb->un.varCfgPort.hps = 1;
1278 
1279  /* If HBA supports SLI=3 ask for it */
1280 
1281  if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1282  if (phba->cfg_enable_bg)
1283  mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1284  if (phba->cfg_enable_dss)
1285  mb->un.varCfgPort.cdss = 1; /* Configure Security */
1286  mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1287  mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1288  mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1289  if (phba->max_vpi && phba->cfg_enable_npiv &&
1290  phba->vpd.sli3Feat.cmv) {
1291  mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1292  mb->un.varCfgPort.cmv = 1;
1293  } else
1294  mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1295  } else
1296  phba->sli_rev = LPFC_SLI_REV2;
1297  mb->un.varCfgPort.sli_mode = phba->sli_rev;
1298 
1299  /* If this is an SLI3 port, configure async status notification. */
1300  if (phba->sli_rev == LPFC_SLI_REV3)
1301  mb->un.varCfgPort.casabt = 1;
1302 
1303  /* Now setup pcb */
1304  phba->pcb->type = TYPE_NATIVE_SLI2;
1305  phba->pcb->feature = FEATURE_INITIAL_SLI2;
1306 
1307  /* Setup Mailbox pointers */
1308  phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1309  offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1310  pdma_addr = phba->slim2p.phys + offset;
1311  phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1312  phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1313 
1314  /*
1315  * Setup Host Group ring pointer.
1316  *
1317  * For efficiency reasons, the ring get/put pointers can be
1318  * placed in adapter memory (SLIM) rather than in host memory.
1319  * This allows firmware to avoid PCI reads/writes when updating
1320  * and checking pointers.
1321  *
1322  * The firmware recognizes the use of SLIM memory by comparing
1323  * the address of the get/put pointers structure with that of
1324  * the SLIM BAR (BAR0).
1325  *
1326  * Caution: be sure to use the PCI config space value of BAR0/BAR1
1327  * (the hardware's view of the base address), not the OS's
1328  * value of pci_resource_start() as the OS value may be a cookie
1329  * for ioremap/iomap.
1330  */
1331 
1332 
1333  pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1334  pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1335 
1336  /*
1337  * Set up HGP - Port Memory
1338  *
1339  * The port expects the host get/put pointers to reside in memory
1340  * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
1341  * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
1342  * words (0x40 bytes). This area is not reserved if HBQs are
1343  * configured in SLI-3.
1344  *
1345  * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
1346  * RR0Get 0xc4 0x84
1347  * CR1Put 0xc8 0x88
1348  * RR1Get 0xcc 0x8c
1349  * CR2Put 0xd0 0x90
1350  * RR2Get 0xd4 0x94
1351  * CR3Put 0xd8 0x98
1352  * RR3Get 0xdc 0x9c
1353  *
1354  * Reserved 0xa0-0xbf
1355  * If HBQs configured:
1356  * HBQ 0 Put ptr 0xc0
1357  * HBQ 1 Put ptr 0xc4
1358  * HBQ 2 Put ptr 0xc8
1359  * ......
1360  * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
1361  *
1362  */
1363 
1364  if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1365  phba->host_gp = &phba->mbox->us.s2.host[0];
1366  phba->hbq_put = NULL;
1367  offset = (uint8_t *)&phba->mbox->us.s2.host -
1368  (uint8_t *)phba->slim2p.virt;
1369  pdma_addr = phba->slim2p.phys + offset;
1370  phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1371  phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1372  } else {
1373  /* Always Host Group Pointer is in SLIM */
1374  mb->un.varCfgPort.hps = 1;
1375 
1376  if (phba->sli_rev == 3) {
1377  phba->host_gp = &mb_slim->us.s3.host[0];
1378  phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1379  } else {
1380  phba->host_gp = &mb_slim->us.s2.host[0];
1381  phba->hbq_put = NULL;
1382  }
1383 
1384  /* mask off BAR0's flag bits 0 - 3 */
1385  phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1386  (void __iomem *)phba->host_gp -
1387  (void __iomem *)phba->MBslimaddr;
1388  if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1389  phba->pcb->hgpAddrHigh = bar_high;
1390  else
1391  phba->pcb->hgpAddrHigh = 0;
1392  /* write HGP data to SLIM at the required longword offset */
1393  memset(&hgp, 0, sizeof(struct lpfc_hgp));
1394 
1395  for (i = 0; i < phba->sli.num_rings; i++) {
1396  lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1397  sizeof(*phba->host_gp));
1398  }
1399  }
1400 
1401  /* Setup Port Group offset */
1402  if (phba->sli_rev == 3)
1403  pgp_offset = offsetof(struct lpfc_sli2_slim,
1404  mbx.us.s3_pgp.port);
1405  else
1406  pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1407  pdma_addr = phba->slim2p.phys + pgp_offset;
1408  phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1409  phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1410 
1411  /* Use callback routine to setp rings in the pcb */
1412  lpfc_config_pcb_setup(phba);
1413 
1414  /* special handling for LC HBAs */
1415  if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1416  uint32_t hbainit[5];
1417 
1418  lpfc_hba_init(phba, hbainit);
1419 
1420  memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1421  }
1422 
1423  /* Swap PCB if needed */
1424  lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1425 }
1426 
1442 void
1444 {
1445  MAILBOX_t *mb = &pmb->u.mb;
1446 
1447  memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1448  mb->mbxCommand = MBX_KILL_BOARD;
1449  mb->mbxOwner = OWN_HOST;
1450  return;
1451 }
1452 
1463 void
1464 lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1465 {
1466  struct lpfc_sli *psli;
1467 
1468  psli = &phba->sli;
1469 
1470  list_add_tail(&mbq->list, &psli->mboxq);
1471 
1472  psli->mboxq_cnt++;
1473 
1474  return;
1475 }
1476 
1491 LPFC_MBOXQ_t *
1492 lpfc_mbox_get(struct lpfc_hba * phba)
1493 {
1494  LPFC_MBOXQ_t *mbq = NULL;
1495  struct lpfc_sli *psli = &phba->sli;
1496 
1497  list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1498  if (mbq)
1499  psli->mboxq_cnt--;
1500 
1501  return mbq;
1502 }
1503 
1514 void
1516 {
1517  list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1518 }
1519 
1530 void
1532 {
1533  unsigned long iflag;
1534 
1535  /* This function expects to be called from interrupt context */
1536  spin_lock_irqsave(&phba->hbalock, iflag);
1537  __lpfc_mbox_cmpl_put(phba, mbq);
1538  spin_unlock_irqrestore(&phba->hbalock, iflag);
1539  return;
1540 }
1541 
1553 int
1555 {
1556  /* Mailbox command that have a completion handler must also have a
1557  * vport specified.
1558  */
1559  if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1560  mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1561  if (!mboxq->vport) {
1563  "1814 Mbox x%x failed, no vport\n",
1564  mboxq->u.mb.mbxCommand);
1565  dump_stack();
1566  return -ENODEV;
1567  }
1568  }
1569  return 0;
1570 }
1571 
1582 int
1584 {
1585  /* If the PCI channel is in offline state, do not issue mbox */
1586  if (unlikely(pci_channel_offline(phba->pcidev)))
1587  return -ENODEV;
1588 
1589  /* If the HBA is in error state, do not issue mbox */
1590  if (phba->link_state == LPFC_HBA_ERROR)
1591  return -ENODEV;
1592 
1593  return 0;
1594 }
1595 
1607 int
1609 {
1610  MAILBOX_t *mbox = &mboxq->u.mb;
1611  uint8_t subsys, opcode;
1612 
1613  switch (mbox->mbxCommand) {
1614  case MBX_WRITE_NV: /* 0x03 */
1615  case MBX_DUMP_MEMORY: /* 0x17 */
1616  case MBX_UPDATE_CFG: /* 0x1B */
1617  case MBX_DOWN_LOAD: /* 0x1C */
1618  case MBX_DEL_LD_ENTRY: /* 0x1D */
1619  case MBX_WRITE_VPARMS: /* 0x32 */
1620  case MBX_LOAD_AREA: /* 0x81 */
1621  case MBX_WRITE_WWN: /* 0x98 */
1622  case MBX_LOAD_EXP_ROM: /* 0x9C */
1623  case MBX_ACCESS_VDATA: /* 0xA5 */
1624  return LPFC_MBOX_TMO_FLASH_CMD;
1625  case MBX_SLI4_CONFIG: /* 0x9b */
1626  subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1627  opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1628  if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1629  switch (opcode) {
1646  }
1647  }
1648  if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1649  switch (opcode) {
1652  }
1653  }
1655  }
1656  return LPFC_MBOX_TMO;
1657 }
1658 
1669 void
1672 {
1673  struct lpfc_mbx_nembed_cmd *nembed_sge;
1674 
1675  nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1676  &mbox->u.mqe.un.nembed_cmd;
1677  nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1678  nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1679  nembed_sge->sge[sgentry].length = length;
1680 }
1681 
1690 void
1692  struct lpfc_mbx_sge *sge)
1693 {
1694  struct lpfc_mbx_nembed_cmd *nembed_sge;
1695 
1696  nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1697  &mbox->u.mqe.un.nembed_cmd;
1698  sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1699  sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1700  sge->length = nembed_sge->sge[sgentry].length;
1701 }
1702 
1710 void
1712 {
1713  struct lpfc_mbx_sli4_config *sli4_cfg;
1714  struct lpfc_mbx_sge sge;
1716  uint32_t sgecount, sgentry;
1717 
1718  sli4_cfg = &mbox->u.mqe.un.sli4_config;
1719 
1720  /* For embedded mbox command, just free the mbox command */
1721  if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1722  mempool_free(mbox, phba->mbox_mem_pool);
1723  return;
1724  }
1725 
1726  /* For non-embedded mbox command, we need to free the pages first */
1727  sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1728  /* There is nothing we can do if there is no sge address array */
1729  if (unlikely(!mbox->sge_array)) {
1730  mempool_free(mbox, phba->mbox_mem_pool);
1731  return;
1732  }
1733  /* Each non-embedded DMA memory was allocated in the length of a page */
1734  for (sgentry = 0; sgentry < sgecount; sgentry++) {
1735  lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1736  phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1738  mbox->sge_array->addr[sgentry], phyaddr);
1739  }
1740  /* Free the sge address array memory */
1741  kfree(mbox->sge_array);
1742  /* Finally, free the mailbox command itself */
1743  mempool_free(mbox, phba->mbox_mem_pool);
1744 }
1745 
1760 int
1761 lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1762  uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1763 {
1764  struct lpfc_mbx_sli4_config *sli4_config;
1765  union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1766  uint32_t alloc_len;
1767  uint32_t resid_len;
1768  uint32_t pagen, pcount;
1769  void *viraddr;
1771 
1772  /* Set up SLI4 mailbox command header fields */
1773  memset(mbox, 0, sizeof(*mbox));
1774  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1775 
1776  /* Set up SLI4 ioctl command header fields */
1777  sli4_config = &mbox->u.mqe.un.sli4_config;
1778 
1779  /* Setup for the embedded mbox command */
1780  if (emb) {
1781  /* Set up main header fields */
1782  bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1783  sli4_config->header.cfg_mhdr.payload_length = length;
1784  /* Set up sub-header fields following main header */
1785  bf_set(lpfc_mbox_hdr_opcode,
1786  &sli4_config->header.cfg_shdr.request, opcode);
1787  bf_set(lpfc_mbox_hdr_subsystem,
1788  &sli4_config->header.cfg_shdr.request, subsystem);
1789  sli4_config->header.cfg_shdr.request.request_length =
1790  length - LPFC_MBX_CMD_HDR_LENGTH;
1791  return length;
1792  }
1793 
1794  /* Setup for the non-embedded mbox command */
1795  pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1796  pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1797  LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1798  /* Allocate record for keeping SGE virtual addresses */
1799  mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1800  GFP_KERNEL);
1801  if (!mbox->sge_array) {
1803  "2527 Failed to allocate non-embedded SGE "
1804  "array.\n");
1805  return 0;
1806  }
1807  for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1808  /* The DMA memory is always allocated in the length of a
1809  * page even though the last SGE might not fill up to a
1810  * page, this is used as a priori size of SLI4_PAGE_SIZE for
1811  * the later DMA memory free.
1812  */
1813  viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1814  &phyaddr, GFP_KERNEL);
1815  /* In case of malloc fails, proceed with whatever we have */
1816  if (!viraddr)
1817  break;
1818  memset(viraddr, 0, SLI4_PAGE_SIZE);
1819  mbox->sge_array->addr[pagen] = viraddr;
1820  /* Keep the first page for later sub-header construction */
1821  if (pagen == 0)
1822  cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1823  resid_len = length - alloc_len;
1824  if (resid_len > SLI4_PAGE_SIZE) {
1825  lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1826  SLI4_PAGE_SIZE);
1827  alloc_len += SLI4_PAGE_SIZE;
1828  } else {
1829  lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1830  resid_len);
1831  alloc_len = length;
1832  }
1833  }
1834 
1835  /* Set up main header fields in mailbox command */
1836  sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1837  bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1838 
1839  /* Set up sub-header fields into the first page */
1840  if (pagen > 0) {
1841  bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1842  bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1843  cfg_shdr->request.request_length =
1844  alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1845  }
1846  /* The sub-header is in DMA memory, which needs endian converstion */
1847  if (cfg_shdr)
1848  lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1849  sizeof(union lpfc_sli4_cfg_shdr));
1850  return alloc_len;
1851 }
1852 
1868 int
1870  uint16_t exts_count, uint16_t rsrc_type, bool emb)
1871 {
1872  uint8_t opcode = 0;
1873  struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1874  void *virtaddr = NULL;
1875 
1876  /* Set up SLI4 ioctl command header fields */
1877  if (emb == LPFC_SLI4_MBX_NEMBED) {
1878  /* Get the first SGE entry from the non-embedded DMA memory */
1879  virtaddr = mbox->sge_array->addr[0];
1880  if (virtaddr == NULL)
1881  return 1;
1882  n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1883  }
1884 
1885  /*
1886  * The resource type is common to all extent Opcodes and resides in the
1887  * same position.
1888  */
1889  if (emb == LPFC_SLI4_MBX_EMBED)
1890  bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1891  &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1892  rsrc_type);
1893  else {
1894  /* This is DMA data. Byteswap is required. */
1895  bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1896  n_rsrc_extnt, rsrc_type);
1897  lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1898  &n_rsrc_extnt->word4,
1899  sizeof(uint32_t));
1900  }
1901 
1902  /* Complete the initialization for the particular Opcode. */
1903  opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1904  switch (opcode) {
1906  if (emb == LPFC_SLI4_MBX_EMBED)
1907  bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1908  &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1909  exts_count);
1910  else
1911  bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1912  n_rsrc_extnt, exts_count);
1913  break;
1917  /* Initialization is complete.*/
1918  break;
1919  default:
1921  "2929 Resource Extent Opcode x%x is "
1922  "unsupported\n", opcode);
1923  return 1;
1924  }
1925 
1926  return 0;
1927 }
1928 
1939 uint8_t
1941 {
1942  struct lpfc_mbx_sli4_config *sli4_cfg;
1943  union lpfc_sli4_cfg_shdr *cfg_shdr;
1944 
1945  if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1946  return LPFC_MBOX_SUBSYSTEM_NA;
1947  sli4_cfg = &mbox->u.mqe.un.sli4_config;
1948 
1949  /* For embedded mbox command, get opcode from embedded sub-header*/
1950  if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1951  cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1952  return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1953  }
1954 
1955  /* For non-embedded mbox command, get opcode from first dma page */
1956  if (unlikely(!mbox->sge_array))
1957  return LPFC_MBOX_SUBSYSTEM_NA;
1958  cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1959  return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1960 }
1961 
1972 uint8_t
1974 {
1975  struct lpfc_mbx_sli4_config *sli4_cfg;
1976  union lpfc_sli4_cfg_shdr *cfg_shdr;
1977 
1978  if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1979  return LPFC_MBOX_OPCODE_NA;
1980  sli4_cfg = &mbox->u.mqe.un.sli4_config;
1981 
1982  /* For embedded mbox command, get opcode from embedded sub-header*/
1983  if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1984  cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1985  return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1986  }
1987 
1988  /* For non-embedded mbox command, get opcode from first dma page */
1989  if (unlikely(!mbox->sge_array))
1990  return LPFC_MBOX_OPCODE_NA;
1991  cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1992  return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1993 }
1994 
2006 int
2008  struct lpfcMboxq *mboxq,
2009  uint16_t fcf_index)
2010 {
2011  void *virt_addr;
2013  uint8_t *bytep;
2014  struct lpfc_mbx_sge sge;
2015  uint32_t alloc_len, req_len;
2016  struct lpfc_mbx_read_fcf_tbl *read_fcf;
2017 
2018  if (!mboxq)
2019  return -ENOMEM;
2020 
2021  req_len = sizeof(struct fcf_record) +
2022  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2023 
2024  /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
2025  alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2028 
2029  if (alloc_len < req_len) {
2031  "0291 Allocated DMA memory size (x%x) is "
2032  "less than the requested DMA memory "
2033  "size (x%x)\n", alloc_len, req_len);
2034  return -ENOMEM;
2035  }
2036 
2037  /* Get the first SGE entry from the non-embedded DMA memory. This
2038  * routine only uses a single SGE.
2039  */
2040  lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2041  phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
2042  virt_addr = mboxq->sge_array->addr[0];
2043  read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2044 
2045  /* Set up command fields */
2046  bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2047  /* Perform necessary endian conversion */
2048  bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2049  lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2050 
2051  return 0;
2052 }
2053 
2061 void
2062 lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2063 {
2064  /* Set up SLI4 mailbox command header fields */
2065  memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2066  bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2067 
2068  /* Set up host requested features. */
2069  bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2070  bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2071 
2072  /* Enable DIF (block guard) only if configured to do so. */
2073  if (phba->cfg_enable_bg)
2074  bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2075 
2076  /* Enable NPIV only if configured to do so. */
2077  if (phba->max_vpi && phba->cfg_enable_npiv)
2078  bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2079 
2080  return;
2081 }
2082 
2094 void
2096 {
2097  struct lpfc_mbx_init_vfi *init_vfi;
2098 
2099  memset(mbox, 0, sizeof(*mbox));
2100  mbox->vport = vport;
2101  init_vfi = &mbox->u.mqe.un.init_vfi;
2102  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2103  bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2104  bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2105  bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2106  bf_set(lpfc_init_vfi_vfi, init_vfi,
2107  vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2108  bf_set(lpfc_init_vfi_vpi, init_vfi,
2109  vport->phba->vpi_ids[vport->vpi]);
2110  bf_set(lpfc_init_vfi_fcfi, init_vfi,
2111  vport->phba->fcf.fcfi);
2112 }
2113 
2125 void
2127 {
2128  struct lpfc_mbx_reg_vfi *reg_vfi;
2129 
2130  memset(mbox, 0, sizeof(*mbox));
2131  reg_vfi = &mbox->u.mqe.un.reg_vfi;
2132  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2133  bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2134  bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2135  vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2136  bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
2137  bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
2138  memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2139  reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2140  reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2141  reg_vfi->e_d_tov = vport->phba->fc_edtov;
2142  reg_vfi->r_a_tov = vport->phba->fc_ratov;
2143  reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2144  reg_vfi->bde.addrLow = putPaddrLow(phys);
2145  reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2146  reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2147  bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2149  "3134 Register VFI, mydid:x%x, fcfi:%d, "
2150  " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2151  vport->fc_myDID,
2152  vport->phba->fcf.fcfi,
2153  vport->phba->sli4_hba.vfi_ids[vport->vfi],
2154  vport->phba->vpi_ids[vport->vpi],
2155  reg_vfi->wwn[0], reg_vfi->wwn[1]);
2156 }
2157 
2170 void
2171 lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2172 {
2173  memset(mbox, 0, sizeof(*mbox));
2174  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2175  bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2176  phba->vpi_ids[vpi]);
2177  bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2178  phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2179 }
2180 
2192 void
2194 {
2195  memset(mbox, 0, sizeof(*mbox));
2196  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2197  bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2198  vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2199 }
2200 
2209 int
2211 {
2212  struct lpfc_dmabuf *mp = NULL;
2213  MAILBOX_t *mb;
2214 
2215  memset(mbox, 0, sizeof(*mbox));
2216  mb = &mbox->u.mb;
2217 
2218  mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2219  if (mp)
2220  mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2221 
2222  if (!mp || !mp->virt) {
2223  kfree(mp);
2224  /* dump config region 23 failed to allocate memory */
2226  "2569 lpfc dump config region 23: memory"
2227  " allocation failed\n");
2228  return 1;
2229  }
2230 
2231  memset(mp->virt, 0, LPFC_BPL_SIZE);
2232  INIT_LIST_HEAD(&mp->list);
2233 
2234  /* save address for completion */
2235  mbox->context1 = (uint8_t *) mp;
2236 
2238  mb->un.varDmp.type = DMP_NV_PARAMS;
2241  mb->un.varWords[3] = putPaddrLow(mp->phys);
2242  mb->un.varWords[4] = putPaddrHigh(mp->phys);
2243  return 0;
2244 }
2245 
2259 void
2260 lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2261 {
2262  struct lpfc_mbx_reg_fcfi *reg_fcfi;
2263 
2264  memset(mbox, 0, sizeof(*mbox));
2265  reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2266  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2267  bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2268  bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2269  bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2270  bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2271  bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2272  phba->fcf.current_rec.fcf_indx);
2273  /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
2274  bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2275  if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2276  bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2277  bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2278  phba->fcf.current_rec.vlan_id);
2279  }
2280 }
2281 
2290 void
2292 {
2293  memset(mbox, 0, sizeof(*mbox));
2294  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2295  bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2296 }
2297 
2306 void
2308 {
2309  struct lpfc_hba *phba = ndlp->phba;
2310  struct lpfc_mbx_resume_rpi *resume_rpi;
2311 
2312  memset(mbox, 0, sizeof(*mbox));
2313  resume_rpi = &mbox->u.mqe.un.resume_rpi;
2314  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2315  bf_set(lpfc_resume_rpi_index, resume_rpi,
2316  phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2317  bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2318  resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2319 }
2320 
2329 void
2331 {
2332  struct lpfc_mbx_supp_pages *supp_pages;
2333 
2334  memset(mbox, 0, sizeof(*mbox));
2335  supp_pages = &mbox->u.mqe.un.supp_pages;
2336  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2337  bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2338 }
2339 
2347 void
2349 {
2350  struct lpfc_mbx_pc_sli4_params *sli4_params;
2351 
2352  memset(mbox, 0, sizeof(*mbox));
2353  sli4_params = &mbox->u.mqe.un.sli4_params;
2354  bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2355  bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2356 }