Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_bsg.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2009-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * *
8  * This program is free software; you can redistribute it and/or *
9  * modify it under the terms of version 2 of the GNU General *
10  * Public License as published by the Free Software Foundation. *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID. See the GNU General Public License for *
17  * more details, a copy of which can be found in the file COPYING *
18  * included with this package. *
19  *******************************************************************/
20 
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
33 
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_bsg.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_version.h"
48 
50  struct list_head node;
51  struct kref kref;
53 
54  /* Event type and waiter identifiers */
58 
59  /* next two flags are here for the auto-delete logic */
60  unsigned long wait_time_stamp;
61  int waiting;
62 
63  /* seen and not seen events */
66 
67  /* job waiting for this event to finish */
69 };
70 
71 struct lpfc_bsg_iocb {
74  struct lpfc_dmabuf *bmp;
76 
77  /* job waiting for this iocb to finish */
79 };
80 
81 struct lpfc_bsg_mbox {
84  struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85  uint8_t *ext; /* extended mailbox data */
86  uint32_t mbOffset; /* from app */
87  uint32_t inExtWLen; /* from app */
88  uint32_t outExtWLen; /* from app */
89 
90  /* job waiting for this mbox command to finish */
92 };
93 
94 #define MENLO_DID 0x0000FC0E
95 
99  struct lpfc_dmabuf *bmp;
100 
101  /* job waiting for this iocb to finish */
103 };
104 
105 #define TYPE_EVT 1
106 #define TYPE_IOCB 2
107 #define TYPE_MBOX 3
108 #define TYPE_MENLO 4
109 struct bsg_job_data {
111  union {
116  } context_un;
117 };
118 
119 struct event_data {
120  struct list_head node;
123  void *data;
125 };
126 
127 #define BUF_SZ_4K 4096
128 #define SLI_CT_ELX_LOOPBACK 0x10
129 
133 };
134 
135 #define ELX_LOOPBACK_HEADER_SZ \
136  (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 
139  struct lpfc_dmabuf dma;
142 };
143 
161 static void
162 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
163  struct lpfc_iocbq *cmdiocbq,
164  struct lpfc_iocbq *rspiocbq)
165 {
166  struct bsg_job_data *dd_data;
167  struct fc_bsg_job *job;
168  IOCB_t *rsp;
169  struct lpfc_dmabuf *bmp;
170  struct lpfc_nodelist *ndlp;
171  struct lpfc_bsg_iocb *iocb;
172  unsigned long flags;
173  int rc = 0;
174 
175  spin_lock_irqsave(&phba->ct_ev_lock, flags);
176  dd_data = cmdiocbq->context2;
177  if (!dd_data) {
178  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
179  lpfc_sli_release_iocbq(phba, cmdiocbq);
180  return;
181  }
182 
183  iocb = &dd_data->context_un.iocb;
184  job = iocb->set_job;
185  job->dd_data = NULL; /* so timeout handler does not reply */
186 
187  bmp = iocb->bmp;
188  rsp = &rspiocbq->iocb;
189  ndlp = cmdiocbq->context1;
190 
191  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
192  job->request_payload.sg_cnt, DMA_TO_DEVICE);
193  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 
196  if (rsp->ulpStatus) {
197  if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198  switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
200  rc = -ETIMEDOUT;
201  break;
202  case IOERR_INVALID_RPI:
203  rc = -EFAULT;
204  break;
205  default:
206  rc = -EACCES;
207  break;
208  }
209  } else
210  rc = -EACCES;
211  } else
212  job->reply->reply_payload_rcv_len =
213  rsp->un.genreq64.bdl.bdeSize;
214 
215  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
216  lpfc_sli_release_iocbq(phba, cmdiocbq);
217  lpfc_nlp_put(ndlp);
218  kfree(bmp);
219  kfree(dd_data);
220  /* make error code available to userspace */
221  job->reply->result = rc;
222  /* complete the job back to userspace */
223  job->job_done(job);
224  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
225  return;
226 }
227 
232 static int
233 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
234 {
235  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
236  struct lpfc_hba *phba = vport->phba;
237  struct lpfc_rport_data *rdata = job->rport->dd_data;
238  struct lpfc_nodelist *ndlp = rdata->pnode;
239  struct ulp_bde64 *bpl = NULL;
240  uint32_t timeout;
241  struct lpfc_iocbq *cmdiocbq = NULL;
242  IOCB_t *cmd;
243  struct lpfc_dmabuf *bmp = NULL;
244  int request_nseg;
245  int reply_nseg;
246  struct scatterlist *sgel = NULL;
247  int numbde;
248  dma_addr_t busaddr;
249  struct bsg_job_data *dd_data;
250  uint32_t creg_val;
251  int rc = 0;
252  int iocb_stat;
253 
254  /* in case no data is transferred */
255  job->reply->reply_payload_rcv_len = 0;
256 
257  /* allocate our bsg tracking structure */
258  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
259  if (!dd_data) {
261  "2733 Failed allocation of dd_data\n");
262  rc = -ENOMEM;
263  goto no_dd_data;
264  }
265 
266  if (!lpfc_nlp_get(ndlp)) {
267  rc = -ENODEV;
268  goto no_ndlp;
269  }
270 
271  bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
272  if (!bmp) {
273  rc = -ENOMEM;
274  goto free_ndlp;
275  }
276 
277  if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
278  rc = -ENODEV;
279  goto free_bmp;
280  }
281 
282  cmdiocbq = lpfc_sli_get_iocbq(phba);
283  if (!cmdiocbq) {
284  rc = -ENOMEM;
285  goto free_bmp;
286  }
287 
288  cmd = &cmdiocbq->iocb;
289  bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290  if (!bmp->virt) {
291  rc = -ENOMEM;
292  goto free_cmdiocbq;
293  }
294 
295  INIT_LIST_HEAD(&bmp->list);
296  bpl = (struct ulp_bde64 *) bmp->virt;
297  request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
298  job->request_payload.sg_cnt, DMA_TO_DEVICE);
299  for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
300  busaddr = sg_dma_address(sgel);
301  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
302  bpl->tus.f.bdeSize = sg_dma_len(sgel);
303  bpl->tus.w = cpu_to_le32(bpl->tus.w);
304  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
306  bpl++;
307  }
308 
309  reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
310  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
311  for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
312  busaddr = sg_dma_address(sgel);
313  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
314  bpl->tus.f.bdeSize = sg_dma_len(sgel);
315  bpl->tus.w = cpu_to_le32(bpl->tus.w);
316  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
318  bpl++;
319  }
320 
321  cmd->un.genreq64.bdl.ulpIoTag32 = 0;
322  cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
323  cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
324  cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
325  cmd->un.genreq64.bdl.bdeSize =
326  (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327  cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
328  cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
329  cmd->un.genreq64.w5.hcsw.Dfctl = 0;
330  cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
331  cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
332  cmd->ulpBdeCount = 1;
333  cmd->ulpLe = 1;
334  cmd->ulpClass = CLASS3;
335  cmd->ulpContext = ndlp->nlp_rpi;
336  if (phba->sli_rev == LPFC_SLI_REV4)
337  cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
338  cmd->ulpOwner = OWN_CHIP;
339  cmdiocbq->vport = phba->pport;
340  cmdiocbq->context3 = bmp;
341  cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
342  timeout = phba->fc_ratov * 2;
343  cmd->ulpTimeout = timeout;
344 
345  cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346  cmdiocbq->context1 = ndlp;
347  cmdiocbq->context2 = dd_data;
348  dd_data->type = TYPE_IOCB;
349  dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350  dd_data->context_un.iocb.set_job = job;
351  dd_data->context_un.iocb.bmp = bmp;
352 
353  if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354  if (lpfc_readl(phba->HCregaddr, &creg_val)) {
355  rc = -EIO ;
356  goto free_cmdiocbq;
357  }
358  creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359  writel(creg_val, phba->HCregaddr);
360  readl(phba->HCregaddr); /* flush */
361  }
362 
363  iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
364  if (iocb_stat == IOCB_SUCCESS)
365  return 0; /* done for now */
366  else if (iocb_stat == IOCB_BUSY)
367  rc = -EAGAIN;
368  else
369  rc = -EIO;
370 
371 
372  /* iocb failed so cleanup */
373  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374  job->request_payload.sg_cnt, DMA_TO_DEVICE);
375  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 
378  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
379 
380 free_cmdiocbq:
381  lpfc_sli_release_iocbq(phba, cmdiocbq);
382 free_bmp:
383  kfree(bmp);
384 free_ndlp:
385  lpfc_nlp_put(ndlp);
386 no_ndlp:
387  kfree(dd_data);
388 no_dd_data:
389  /* make error code available to userspace */
390  job->reply->result = rc;
391  job->dd_data = NULL;
392  return rc;
393 }
394 
412 static void
413 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
414  struct lpfc_iocbq *cmdiocbq,
415  struct lpfc_iocbq *rspiocbq)
416 {
417  struct bsg_job_data *dd_data;
418  struct fc_bsg_job *job;
419  IOCB_t *rsp;
420  struct lpfc_nodelist *ndlp;
421  struct lpfc_dmabuf *pbuflist = NULL;
422  struct fc_bsg_ctels_reply *els_reply;
423  uint8_t *rjt_data;
424  unsigned long flags;
425  int rc = 0;
426 
427  spin_lock_irqsave(&phba->ct_ev_lock, flags);
428  dd_data = cmdiocbq->context1;
429  /* normal completion and timeout crossed paths, already done */
430  if (!dd_data) {
431  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
432  return;
433  }
434 
435  cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
436  if (cmdiocbq->context2 && rspiocbq)
437  memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
438  &rspiocbq->iocb, sizeof(IOCB_t));
439 
440  job = dd_data->context_un.iocb.set_job;
441  cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442  rspiocbq = dd_data->context_un.iocb.rspiocbq;
443  rsp = &rspiocbq->iocb;
444  ndlp = dd_data->context_un.iocb.ndlp;
445 
446  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
447  job->request_payload.sg_cnt, DMA_TO_DEVICE);
448  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
449  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 
451  if (job->reply->result == -EAGAIN)
452  rc = -EAGAIN;
453  else if (rsp->ulpStatus == IOSTAT_SUCCESS)
454  job->reply->reply_payload_rcv_len =
455  rsp->un.elsreq64.bdl.bdeSize;
456  else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
457  job->reply->reply_payload_rcv_len =
458  sizeof(struct fc_bsg_ctels_reply);
459  /* LS_RJT data returned in word 4 */
460  rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
461  els_reply = &job->reply->reply_data.ctels_reply;
462  els_reply->status = FC_CTELS_STATUS_REJECT;
463  els_reply->rjt_data.action = rjt_data[3];
464  els_reply->rjt_data.reason_code = rjt_data[2];
465  els_reply->rjt_data.reason_explanation = rjt_data[1];
466  els_reply->rjt_data.vendor_unique = rjt_data[0];
467  } else
468  rc = -EIO;
469 
470  pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471  lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472  lpfc_sli_release_iocbq(phba, rspiocbq);
473  lpfc_sli_release_iocbq(phba, cmdiocbq);
474  lpfc_nlp_put(ndlp);
475  kfree(dd_data);
476  /* make error code available to userspace */
477  job->reply->result = rc;
478  job->dd_data = NULL;
479  /* complete the job back to userspace */
480  job->job_done(job);
481  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
482  return;
483 }
484 
489 static int
490 lpfc_bsg_rport_els(struct fc_bsg_job *job)
491 {
492  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
493  struct lpfc_hba *phba = vport->phba;
494  struct lpfc_rport_data *rdata = job->rport->dd_data;
495  struct lpfc_nodelist *ndlp = rdata->pnode;
496  uint32_t elscmd;
497  uint32_t cmdsize;
498  uint32_t rspsize;
499  struct lpfc_iocbq *rspiocbq;
500  struct lpfc_iocbq *cmdiocbq;
501  IOCB_t *rsp;
502  uint16_t rpi = 0;
503  struct lpfc_dmabuf *pcmd;
504  struct lpfc_dmabuf *prsp;
505  struct lpfc_dmabuf *pbuflist = NULL;
506  struct ulp_bde64 *bpl;
507  int request_nseg;
508  int reply_nseg;
509  struct scatterlist *sgel = NULL;
510  int numbde;
511  dma_addr_t busaddr;
512  struct bsg_job_data *dd_data;
513  uint32_t creg_val;
514  int rc = 0;
515 
516  /* in case no data is transferred */
517  job->reply->reply_payload_rcv_len = 0;
518 
519  /* allocate our bsg tracking structure */
520  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521  if (!dd_data) {
523  "2735 Failed allocation of dd_data\n");
524  rc = -ENOMEM;
525  goto no_dd_data;
526  }
527 
528  if (!lpfc_nlp_get(ndlp)) {
529  rc = -ENODEV;
530  goto free_dd_data;
531  }
532 
533  elscmd = job->request->rqst_data.r_els.els_code;
534  cmdsize = job->request_payload.payload_len;
535  rspsize = job->reply_payload.payload_len;
536  rspiocbq = lpfc_sli_get_iocbq(phba);
537  if (!rspiocbq) {
538  lpfc_nlp_put(ndlp);
539  rc = -ENOMEM;
540  goto free_dd_data;
541  }
542 
543  rsp = &rspiocbq->iocb;
544  rpi = ndlp->nlp_rpi;
545 
546  cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547  ndlp->nlp_DID, elscmd);
548  if (!cmdiocbq) {
549  rc = -EIO;
550  goto free_rspiocbq;
551  }
552 
553  /* prep els iocb set context1 to the ndlp, context2 to the command
554  * dmabuf, context3 holds the data dmabuf
555  */
556  pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
557  prsp = (struct lpfc_dmabuf *) pcmd->list.next;
558  lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
559  kfree(pcmd);
560  lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
561  kfree(prsp);
562  cmdiocbq->context2 = NULL;
563 
564  pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565  bpl = (struct ulp_bde64 *) pbuflist->virt;
566 
567  request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568  job->request_payload.sg_cnt, DMA_TO_DEVICE);
569  for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570  busaddr = sg_dma_address(sgel);
571  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572  bpl->tus.f.bdeSize = sg_dma_len(sgel);
573  bpl->tus.w = cpu_to_le32(bpl->tus.w);
574  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
576  bpl++;
577  }
578 
579  reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581  for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582  busaddr = sg_dma_address(sgel);
583  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584  bpl->tus.f.bdeSize = sg_dma_len(sgel);
585  bpl->tus.w = cpu_to_le32(bpl->tus.w);
586  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
588  bpl++;
589  }
590  cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591  (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592  if (phba->sli_rev == LPFC_SLI_REV4)
593  cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594  else
595  cmdiocbq->iocb.ulpContext = rpi;
596  cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597  cmdiocbq->context1 = NULL;
598  cmdiocbq->context2 = NULL;
599 
600  cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601  cmdiocbq->context1 = dd_data;
602  cmdiocbq->context_un.ndlp = ndlp;
603  cmdiocbq->context2 = rspiocbq;
604  dd_data->type = TYPE_IOCB;
605  dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606  dd_data->context_un.iocb.rspiocbq = rspiocbq;
607  dd_data->context_un.iocb.set_job = job;
608  dd_data->context_un.iocb.bmp = NULL;
609  dd_data->context_un.iocb.ndlp = ndlp;
610 
611  if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612  if (lpfc_readl(phba->HCregaddr, &creg_val)) {
613  rc = -EIO;
614  goto linkdown_err;
615  }
616  creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
617  writel(creg_val, phba->HCregaddr);
618  readl(phba->HCregaddr); /* flush */
619  }
620  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
621  lpfc_nlp_put(ndlp);
622  if (rc == IOCB_SUCCESS)
623  return 0; /* done for now */
624  else if (rc == IOCB_BUSY)
625  rc = -EAGAIN;
626  else
627  rc = -EIO;
628 
629 linkdown_err:
630  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631  job->request_payload.sg_cnt, DMA_TO_DEVICE);
632  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634 
635  lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
636 
637  lpfc_sli_release_iocbq(phba, cmdiocbq);
638 
639 free_rspiocbq:
640  lpfc_sli_release_iocbq(phba, rspiocbq);
641 
642 free_dd_data:
643  kfree(dd_data);
644 
645 no_dd_data:
646  /* make error code available to userspace */
647  job->reply->result = rc;
648  job->dd_data = NULL;
649  return rc;
650 }
651 
660 static void
661 lpfc_bsg_event_free(struct kref *kref)
662 {
663  struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
664  kref);
665  struct event_data *ed;
666 
667  list_del(&evt->node);
668 
669  while (!list_empty(&evt->events_to_get)) {
670  ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
671  list_del(&ed->node);
672  kfree(ed->data);
673  kfree(ed);
674  }
675 
676  while (!list_empty(&evt->events_to_see)) {
677  ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
678  list_del(&ed->node);
679  kfree(ed->data);
680  kfree(ed);
681  }
682 
683  kfree(evt);
684 }
685 
690 static inline void
691 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
692 {
693  kref_get(&evt->kref);
694 }
695 
700 static inline void
701 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
702 {
703  kref_put(&evt->kref, lpfc_bsg_event_free);
704 }
705 
712 static struct lpfc_bsg_event *
713 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
714 {
715  struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
716 
717  if (!evt)
718  return NULL;
719 
720  INIT_LIST_HEAD(&evt->events_to_get);
721  INIT_LIST_HEAD(&evt->events_to_see);
722  evt->type_mask = ev_mask;
723  evt->req_id = ev_req_id;
724  evt->reg_id = ev_reg_id;
725  evt->wait_time_stamp = jiffies;
726  init_waitqueue_head(&evt->wq);
727  kref_init(&evt->kref);
728  return evt;
729 }
730 
736 static int
737 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
738 {
739  struct lpfc_dmabufext *mlast;
740  struct pci_dev *pcidev;
741  struct list_head head, *curr, *next;
742 
743  if ((!mlist) || (!lpfc_is_link_up(phba) &&
744  (phba->link_flag & LS_LOOPBACK_MODE))) {
745  return 0;
746  }
747 
748  pcidev = phba->pcidev;
749  list_add_tail(&head, &mlist->dma.list);
750 
751  list_for_each_safe(curr, next, &head) {
752  mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
753  if (mlast->dma.virt)
754  dma_free_coherent(&pcidev->dev,
755  mlast->size,
756  mlast->dma.virt,
757  mlast->dma.phys);
758  kfree(mlast);
759  }
760  return 0;
761 }
762 
772 int
773 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
774  struct lpfc_iocbq *piocbq)
775 {
776  uint32_t evt_req_id = 0;
777  uint32_t cmd;
778  uint32_t len;
779  struct lpfc_dmabuf *dmabuf = NULL;
780  struct lpfc_bsg_event *evt;
781  struct event_data *evt_dat = NULL;
782  struct lpfc_iocbq *iocbq;
783  size_t offset = 0;
784  struct list_head head;
785  struct ulp_bde64 *bde;
787  int i;
788  struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
789  struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
790  struct lpfc_hbq_entry *hbqe;
791  struct lpfc_sli_ct_request *ct_req;
792  struct fc_bsg_job *job = NULL;
793  unsigned long flags;
794  int size = 0;
795 
796  INIT_LIST_HEAD(&head);
797  list_add_tail(&head, &piocbq->list);
798 
799  if (piocbq->iocb.ulpBdeCount == 0 ||
800  piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
801  goto error_ct_unsol_exit;
802 
803  if (phba->link_state == LPFC_HBA_ERROR ||
804  (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
805  goto error_ct_unsol_exit;
806 
808  dmabuf = bdeBuf1;
809  else {
810  dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
811  piocbq->iocb.un.cont64[0].addrLow);
812  dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
813  }
814  if (dmabuf == NULL)
815  goto error_ct_unsol_exit;
816  ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
817  evt_req_id = ct_req->FsType;
818  cmd = ct_req->CommandResponse.bits.CmdRsp;
819  len = ct_req->CommandResponse.bits.Size;
821  lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
822 
823  spin_lock_irqsave(&phba->ct_ev_lock, flags);
824  list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
825  if (!(evt->type_mask & FC_REG_CT_EVENT) ||
826  evt->req_id != evt_req_id)
827  continue;
828 
829  lpfc_bsg_event_ref(evt);
830  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
831  evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
832  if (evt_dat == NULL) {
833  spin_lock_irqsave(&phba->ct_ev_lock, flags);
834  lpfc_bsg_event_unref(evt);
836  "2614 Memory allocation failed for "
837  "CT event\n");
838  break;
839  }
840 
841  if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
842  /* take accumulated byte count from the last iocbq */
843  iocbq = list_entry(head.prev, typeof(*iocbq), list);
844  evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
845  } else {
846  list_for_each_entry(iocbq, &head, list) {
847  for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
848  evt_dat->len +=
849  iocbq->iocb.un.cont64[i].tus.f.bdeSize;
850  }
851  }
852 
853  evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
854  if (evt_dat->data == NULL) {
856  "2615 Memory allocation failed for "
857  "CT event data, size %d\n",
858  evt_dat->len);
859  kfree(evt_dat);
860  spin_lock_irqsave(&phba->ct_ev_lock, flags);
861  lpfc_bsg_event_unref(evt);
862  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
863  goto error_ct_unsol_exit;
864  }
865 
866  list_for_each_entry(iocbq, &head, list) {
867  size = 0;
868  if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
869  bdeBuf1 = iocbq->context2;
870  bdeBuf2 = iocbq->context3;
871  }
872  for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
873  if (phba->sli3_options &
875  if (i == 0) {
876  hbqe = (struct lpfc_hbq_entry *)
877  &iocbq->iocb.un.ulpWord[0];
878  size = hbqe->bde.tus.f.bdeSize;
879  dmabuf = bdeBuf1;
880  } else if (i == 1) {
881  hbqe = (struct lpfc_hbq_entry *)
882  &iocbq->iocb.unsli3.
883  sli3Words[4];
884  size = hbqe->bde.tus.f.bdeSize;
885  dmabuf = bdeBuf2;
886  }
887  if ((offset + size) > evt_dat->len)
888  size = evt_dat->len - offset;
889  } else {
890  size = iocbq->iocb.un.cont64[i].
891  tus.f.bdeSize;
892  bde = &iocbq->iocb.un.cont64[i];
893  dma_addr = getPaddr(bde->addrHigh,
894  bde->addrLow);
895  dmabuf = lpfc_sli_ringpostbuf_get(phba,
896  pring, dma_addr);
897  }
898  if (!dmabuf) {
900  LOG_LIBDFC, "2616 No dmabuf "
901  "found for iocbq 0x%p\n",
902  iocbq);
903  kfree(evt_dat->data);
904  kfree(evt_dat);
906  flags);
907  lpfc_bsg_event_unref(evt);
908  spin_unlock_irqrestore(
909  &phba->ct_ev_lock, flags);
910  goto error_ct_unsol_exit;
911  }
912  memcpy((char *)(evt_dat->data) + offset,
913  dmabuf->virt, size);
914  offset += size;
915  if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
916  !(phba->sli3_options &
918  lpfc_sli_ringpostbuf_put(phba, pring,
919  dmabuf);
920  } else {
921  switch (cmd) {
922  case ELX_LOOPBACK_DATA:
923  if (phba->sli_rev <
925  diag_cmd_data_free(phba,
926  (struct lpfc_dmabufext
927  *)dmabuf);
928  break;
930  if ((phba->sli_rev ==
931  LPFC_SLI_REV2) ||
932  (phba->sli3_options &
934  )) {
935  lpfc_in_buf_free(phba,
936  dmabuf);
937  } else {
938  lpfc_post_buffer(phba,
939  pring,
940  1);
941  }
942  break;
943  default:
944  if (!(phba->sli3_options &
946  lpfc_post_buffer(phba,
947  pring,
948  1);
949  break;
950  }
951  }
952  }
953  }
954 
955  spin_lock_irqsave(&phba->ct_ev_lock, flags);
956  if (phba->sli_rev == LPFC_SLI_REV4) {
957  evt_dat->immed_dat = phba->ctx_idx;
958  phba->ctx_idx = (phba->ctx_idx + 1) % 64;
959  /* Provide warning for over-run of the ct_ctx array */
960  if (phba->ct_ctx[evt_dat->immed_dat].flags &
961  UNSOL_VALID)
963  "2717 CT context array entry "
964  "[%d] over-run: oxid:x%x, "
965  "sid:x%x\n", phba->ctx_idx,
966  phba->ct_ctx[
967  evt_dat->immed_dat].oxid,
968  phba->ct_ctx[
969  evt_dat->immed_dat].SID);
970  phba->ct_ctx[evt_dat->immed_dat].rxid =
971  piocbq->iocb.ulpContext;
972  phba->ct_ctx[evt_dat->immed_dat].oxid =
973  piocbq->iocb.unsli3.rcvsli3.ox_id;
974  phba->ct_ctx[evt_dat->immed_dat].SID =
975  piocbq->iocb.un.rcvels.remoteID;
976  phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
977  } else
978  evt_dat->immed_dat = piocbq->iocb.ulpContext;
979 
980  evt_dat->type = FC_REG_CT_EVENT;
981  list_add(&evt_dat->node, &evt->events_to_see);
982  if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
983  wake_up_interruptible(&evt->wq);
984  lpfc_bsg_event_unref(evt);
985  break;
986  }
987 
988  list_move(evt->events_to_see.prev, &evt->events_to_get);
989  lpfc_bsg_event_unref(evt);
990 
991  job = evt->set_job;
992  evt->set_job = NULL;
993  if (job) {
994  job->reply->reply_payload_rcv_len = size;
995  /* make error code available to userspace */
996  job->reply->result = 0;
997  job->dd_data = NULL;
998  /* complete the job back to userspace */
999  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1000  job->job_done(job);
1001  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1002  }
1003  }
1004  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005 
1006 error_ct_unsol_exit:
1007  if (!list_empty(&head))
1008  list_del(&head);
1009  if ((phba->sli_rev < LPFC_SLI_REV4) &&
1010  (evt_req_id == SLI_CT_ELX_LOOPBACK))
1011  return 0;
1012  return 1;
1013 }
1014 
1019 static int
1020 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1021 {
1022  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1023  struct lpfc_hba *phba = vport->phba;
1024  struct set_ct_event *event_req;
1025  struct lpfc_bsg_event *evt;
1026  int rc = 0;
1027  struct bsg_job_data *dd_data = NULL;
1028  uint32_t ev_mask;
1029  unsigned long flags;
1030 
1031  if (job->request_len <
1032  sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1034  "2612 Received SET_CT_EVENT below minimum "
1035  "size\n");
1036  rc = -EINVAL;
1037  goto job_error;
1038  }
1039 
1040  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1041  if (dd_data == NULL) {
1043  "2734 Failed allocation of dd_data\n");
1044  rc = -ENOMEM;
1045  goto job_error;
1046  }
1047 
1048  event_req = (struct set_ct_event *)
1049  job->request->rqst_data.h_vendor.vendor_cmd;
1050  ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1052  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1053  list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1054  if (evt->reg_id == event_req->ev_reg_id) {
1055  lpfc_bsg_event_ref(evt);
1056  evt->wait_time_stamp = jiffies;
1057  break;
1058  }
1059  }
1060  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1061 
1062  if (&evt->node == &phba->ct_ev_waiters) {
1063  /* no event waiting struct yet - first call */
1064  evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1065  event_req->ev_req_id);
1066  if (!evt) {
1068  "2617 Failed allocation of event "
1069  "waiter\n");
1070  rc = -ENOMEM;
1071  goto job_error;
1072  }
1073 
1074  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1075  list_add(&evt->node, &phba->ct_ev_waiters);
1076  lpfc_bsg_event_ref(evt);
1077  evt->wait_time_stamp = jiffies;
1078  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1079  }
1080 
1081  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1082  evt->waiting = 1;
1083  dd_data->type = TYPE_EVT;
1084  dd_data->context_un.evt = evt;
1085  evt->set_job = job; /* for unsolicited command */
1086  job->dd_data = dd_data; /* for fc transport timeout callback*/
1087  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1088  return 0; /* call job done later */
1089 
1090 job_error:
1091  if (dd_data != NULL)
1092  kfree(dd_data);
1093 
1094  job->dd_data = NULL;
1095  return rc;
1096 }
1097 
1102 static int
1103 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1104 {
1105  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1106  struct lpfc_hba *phba = vport->phba;
1107  struct get_ct_event *event_req;
1108  struct get_ct_event_reply *event_reply;
1109  struct lpfc_bsg_event *evt;
1110  struct event_data *evt_dat = NULL;
1111  unsigned long flags;
1112  uint32_t rc = 0;
1113 
1114  if (job->request_len <
1115  sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1117  "2613 Received GET_CT_EVENT request below "
1118  "minimum size\n");
1119  rc = -EINVAL;
1120  goto job_error;
1121  }
1122 
1123  event_req = (struct get_ct_event *)
1124  job->request->rqst_data.h_vendor.vendor_cmd;
1125 
1126  event_reply = (struct get_ct_event_reply *)
1127  job->reply->reply_data.vendor_reply.vendor_rsp;
1128  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1129  list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1130  if (evt->reg_id == event_req->ev_reg_id) {
1131  if (list_empty(&evt->events_to_get))
1132  break;
1133  lpfc_bsg_event_ref(evt);
1134  evt->wait_time_stamp = jiffies;
1135  evt_dat = list_entry(evt->events_to_get.prev,
1136  struct event_data, node);
1137  list_del(&evt_dat->node);
1138  break;
1139  }
1140  }
1141  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1142 
1143  /* The app may continue to ask for event data until it gets
1144  * an error indicating that there isn't anymore
1145  */
1146  if (evt_dat == NULL) {
1147  job->reply->reply_payload_rcv_len = 0;
1148  rc = -ENOENT;
1149  goto job_error;
1150  }
1151 
1152  if (evt_dat->len > job->request_payload.payload_len) {
1153  evt_dat->len = job->request_payload.payload_len;
1155  "2618 Truncated event data at %d "
1156  "bytes\n",
1157  job->request_payload.payload_len);
1158  }
1159 
1160  event_reply->type = evt_dat->type;
1161  event_reply->immed_data = evt_dat->immed_dat;
1162  if (evt_dat->len > 0)
1163  job->reply->reply_payload_rcv_len =
1164  sg_copy_from_buffer(job->request_payload.sg_list,
1165  job->request_payload.sg_cnt,
1166  evt_dat->data, evt_dat->len);
1167  else
1168  job->reply->reply_payload_rcv_len = 0;
1169 
1170  if (evt_dat) {
1171  kfree(evt_dat->data);
1172  kfree(evt_dat);
1173  }
1174 
1175  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1176  lpfc_bsg_event_unref(evt);
1177  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1178  job->dd_data = NULL;
1179  job->reply->result = 0;
1180  job->job_done(job);
1181  return 0;
1182 
1183 job_error:
1184  job->dd_data = NULL;
1185  job->reply->result = rc;
1186  return rc;
1187 }
1188 
1206 static void
1207 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1208  struct lpfc_iocbq *cmdiocbq,
1209  struct lpfc_iocbq *rspiocbq)
1210 {
1211  struct bsg_job_data *dd_data;
1212  struct fc_bsg_job *job;
1213  IOCB_t *rsp;
1214  struct lpfc_dmabuf *bmp;
1215  struct lpfc_nodelist *ndlp;
1216  unsigned long flags;
1217  int rc = 0;
1218 
1219  spin_lock_irqsave(&phba->ct_ev_lock, flags);
1220  dd_data = cmdiocbq->context2;
1221  /* normal completion and timeout crossed paths, already done */
1222  if (!dd_data) {
1223  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224  return;
1225  }
1226 
1227  job = dd_data->context_un.iocb.set_job;
1228  bmp = dd_data->context_un.iocb.bmp;
1229  rsp = &rspiocbq->iocb;
1230  ndlp = dd_data->context_un.iocb.ndlp;
1231 
1232  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1233  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1234 
1235  if (rsp->ulpStatus) {
1236  if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1237  switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1239  rc = -ETIMEDOUT;
1240  break;
1241  case IOERR_INVALID_RPI:
1242  rc = -EFAULT;
1243  break;
1244  default:
1245  rc = -EACCES;
1246  break;
1247  }
1248  } else
1249  rc = -EACCES;
1250  } else
1251  job->reply->reply_payload_rcv_len =
1252  rsp->un.genreq64.bdl.bdeSize;
1253 
1254  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1255  lpfc_sli_release_iocbq(phba, cmdiocbq);
1256  lpfc_nlp_put(ndlp);
1257  kfree(bmp);
1258  kfree(dd_data);
1259  /* make error code available to userspace */
1260  job->reply->result = rc;
1261  job->dd_data = NULL;
1262  /* complete the job back to userspace */
1263  job->job_done(job);
1264  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1265  return;
1266 }
1267 
1276 static int
1277 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1278  struct lpfc_dmabuf *bmp, int num_entry)
1279 {
1280  IOCB_t *icmd;
1281  struct lpfc_iocbq *ctiocb = NULL;
1282  int rc = 0;
1283  struct lpfc_nodelist *ndlp = NULL;
1284  struct bsg_job_data *dd_data;
1285  uint32_t creg_val;
1286 
1287  /* allocate our bsg tracking structure */
1288  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1289  if (!dd_data) {
1291  "2736 Failed allocation of dd_data\n");
1292  rc = -ENOMEM;
1293  goto no_dd_data;
1294  }
1295 
1296  /* Allocate buffer for command iocb */
1297  ctiocb = lpfc_sli_get_iocbq(phba);
1298  if (!ctiocb) {
1299  rc = -ENOMEM;
1300  goto no_ctiocb;
1301  }
1302 
1303  icmd = &ctiocb->iocb;
1304  icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1305  icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1306  icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1308  icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1309  icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1310  icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1311  icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1312  icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1313 
1314  /* Fill in rest of iocb */
1316  icmd->ulpBdeCount = 1;
1317  icmd->ulpLe = 1;
1318  icmd->ulpClass = CLASS3;
1319  if (phba->sli_rev == LPFC_SLI_REV4) {
1320  /* Do not issue unsol response if oxid not marked as valid */
1321  if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1322  rc = IOCB_ERROR;
1323  goto issue_ct_rsp_exit;
1324  }
1325  icmd->ulpContext = phba->ct_ctx[tag].rxid;
1326  icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1327  ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1328  if (!ndlp) {
1330  "2721 ndlp null for oxid %x SID %x\n",
1331  icmd->ulpContext,
1332  phba->ct_ctx[tag].SID);
1333  rc = IOCB_ERROR;
1334  goto issue_ct_rsp_exit;
1335  }
1336 
1337  /* Check if the ndlp is active */
1338  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1339  rc = -IOCB_ERROR;
1340  goto issue_ct_rsp_exit;
1341  }
1342 
1343  /* get a refernece count so the ndlp doesn't go away while
1344  * we respond
1345  */
1346  if (!lpfc_nlp_get(ndlp)) {
1347  rc = -IOCB_ERROR;
1348  goto issue_ct_rsp_exit;
1349  }
1350 
1351  icmd->un.ulpWord[3] =
1352  phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1353 
1354  /* The exchange is done, mark the entry as invalid */
1355  phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1356  } else
1357  icmd->ulpContext = (ushort) tag;
1358 
1359  icmd->ulpTimeout = phba->fc_ratov * 2;
1360 
1361  /* Xmit CT response on exchange <xid> */
1363  "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1364  icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1365 
1366  ctiocb->iocb_cmpl = NULL;
1367  ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1368  ctiocb->vport = phba->pport;
1369  ctiocb->context3 = bmp;
1370 
1371  ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1372  ctiocb->context2 = dd_data;
1373  ctiocb->context1 = ndlp;
1374  dd_data->type = TYPE_IOCB;
1375  dd_data->context_un.iocb.cmdiocbq = ctiocb;
1376  dd_data->context_un.iocb.rspiocbq = NULL;
1377  dd_data->context_un.iocb.set_job = job;
1378  dd_data->context_un.iocb.bmp = bmp;
1379  dd_data->context_un.iocb.ndlp = ndlp;
1380 
1381  if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1382  if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1383  rc = -IOCB_ERROR;
1384  goto issue_ct_rsp_exit;
1385  }
1386  creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1387  writel(creg_val, phba->HCregaddr);
1388  readl(phba->HCregaddr); /* flush */
1389  }
1390 
1391  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1392 
1393  if (rc == IOCB_SUCCESS)
1394  return 0; /* done for now */
1395 
1396 issue_ct_rsp_exit:
1397  lpfc_sli_release_iocbq(phba, ctiocb);
1398 no_ctiocb:
1399  kfree(dd_data);
1400 no_dd_data:
1401  return rc;
1402 }
1403 
1408 static int
1409 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1410 {
1411  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1412  struct lpfc_hba *phba = vport->phba;
1413  struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1414  job->request->rqst_data.h_vendor.vendor_cmd;
1415  struct ulp_bde64 *bpl;
1416  struct lpfc_dmabuf *bmp = NULL;
1417  struct scatterlist *sgel = NULL;
1418  int request_nseg;
1419  int numbde;
1420  dma_addr_t busaddr;
1421  uint32_t tag = mgmt_resp->tag;
1422  unsigned long reqbfrcnt =
1423  (unsigned long)job->request_payload.payload_len;
1424  int rc = 0;
1425 
1426  /* in case no data is transferred */
1427  job->reply->reply_payload_rcv_len = 0;
1428 
1429  if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1430  rc = -ERANGE;
1431  goto send_mgmt_rsp_exit;
1432  }
1433 
1434  bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1435  if (!bmp) {
1436  rc = -ENOMEM;
1437  goto send_mgmt_rsp_exit;
1438  }
1439 
1440  bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1441  if (!bmp->virt) {
1442  rc = -ENOMEM;
1443  goto send_mgmt_rsp_free_bmp;
1444  }
1445 
1446  INIT_LIST_HEAD(&bmp->list);
1447  bpl = (struct ulp_bde64 *) bmp->virt;
1448  request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1449  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1450  for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1451  busaddr = sg_dma_address(sgel);
1452  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1453  bpl->tus.f.bdeSize = sg_dma_len(sgel);
1454  bpl->tus.w = cpu_to_le32(bpl->tus.w);
1455  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1456  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1457  bpl++;
1458  }
1459 
1460  rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1461 
1462  if (rc == IOCB_SUCCESS)
1463  return 0; /* done for now */
1464 
1465  /* TBD need to handle a timeout */
1466  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1467  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1468  rc = -EACCES;
1469  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1470 
1471 send_mgmt_rsp_free_bmp:
1472  kfree(bmp);
1473 send_mgmt_rsp_exit:
1474  /* make error code available to userspace */
1475  job->reply->result = rc;
1476  job->dd_data = NULL;
1477  return rc;
1478 }
1479 
1487 static int
1488 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1489 {
1490  struct lpfc_vport **vports;
1491  struct Scsi_Host *shost;
1492  struct lpfc_sli *psli;
1493  struct lpfc_sli_ring *pring;
1494  int i = 0;
1495 
1496  psli = &phba->sli;
1497  if (!psli)
1498  return -ENODEV;
1499 
1500  pring = &psli->ring[LPFC_FCP_RING];
1501  if (!pring)
1502  return -ENODEV;
1503 
1504  if ((phba->link_state == LPFC_HBA_ERROR) ||
1505  (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1506  (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1507  return -EACCES;
1508 
1509  vports = lpfc_create_vport_work_array(phba);
1510  if (vports) {
1511  for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1512  shost = lpfc_shost_from_vport(vports[i]);
1513  scsi_block_requests(shost);
1514  }
1515  lpfc_destroy_vport_work_array(phba, vports);
1516  } else {
1517  shost = lpfc_shost_from_vport(phba->pport);
1518  scsi_block_requests(shost);
1519  }
1520 
1521  while (pring->txcmplq_cnt) {
1522  if (i++ > 500) /* wait up to 5 seconds */
1523  break;
1524  msleep(10);
1525  }
1526  return 0;
1527 }
1528 
1536 static void
1537 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1538 {
1539  struct Scsi_Host *shost;
1540  struct lpfc_vport **vports;
1541  int i;
1542 
1543  vports = lpfc_create_vport_work_array(phba);
1544  if (vports) {
1545  for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1546  shost = lpfc_shost_from_vport(vports[i]);
1547  scsi_unblock_requests(shost);
1548  }
1549  lpfc_destroy_vport_work_array(phba, vports);
1550  } else {
1551  shost = lpfc_shost_from_vport(phba->pport);
1552  scsi_unblock_requests(shost);
1553  }
1554  return;
1555 }
1556 
1570 static int
1571 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1572 {
1573  struct diag_mode_set *loopback_mode;
1574  uint32_t link_flags;
1575  uint32_t timeout;
1576  LPFC_MBOXQ_t *pmboxq = NULL;
1577  int mbxstatus = MBX_SUCCESS;
1578  int i = 0;
1579  int rc = 0;
1580 
1581  /* no data to return just the return code */
1582  job->reply->reply_payload_rcv_len = 0;
1583 
1584  if (job->request_len < sizeof(struct fc_bsg_request) +
1585  sizeof(struct diag_mode_set)) {
1587  "2738 Received DIAG MODE request size:%d "
1588  "below the minimum size:%d\n",
1589  job->request_len,
1590  (int)(sizeof(struct fc_bsg_request) +
1591  sizeof(struct diag_mode_set)));
1592  rc = -EINVAL;
1593  goto job_error;
1594  }
1595 
1596  rc = lpfc_bsg_diag_mode_enter(phba);
1597  if (rc)
1598  goto job_error;
1599 
1600  /* bring the link to diagnostic mode */
1601  loopback_mode = (struct diag_mode_set *)
1602  job->request->rqst_data.h_vendor.vendor_cmd;
1603  link_flags = loopback_mode->type;
1604  timeout = loopback_mode->timeout * 100;
1605 
1606  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1607  if (!pmboxq) {
1608  rc = -ENOMEM;
1609  goto loopback_mode_exit;
1610  }
1611  memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1612  pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1613  pmboxq->u.mb.mbxOwner = OWN_HOST;
1614 
1615  mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1616 
1617  if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1618  /* wait for link down before proceeding */
1619  i = 0;
1620  while (phba->link_state != LPFC_LINK_DOWN) {
1621  if (i++ > timeout) {
1622  rc = -ETIMEDOUT;
1623  goto loopback_mode_exit;
1624  }
1625  msleep(10);
1626  }
1627 
1628  memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1629  if (link_flags == INTERNAL_LOOP_BACK)
1631  else
1632  pmboxq->u.mb.un.varInitLnk.link_flags =
1634 
1635  pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1636  pmboxq->u.mb.mbxOwner = OWN_HOST;
1637 
1638  mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1639  LPFC_MBOX_TMO);
1640 
1641  if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1642  rc = -ENODEV;
1643  else {
1644  spin_lock_irq(&phba->hbalock);
1645  phba->link_flag |= LS_LOOPBACK_MODE;
1646  spin_unlock_irq(&phba->hbalock);
1647  /* wait for the link attention interrupt */
1648  msleep(100);
1649 
1650  i = 0;
1651  while (phba->link_state != LPFC_HBA_READY) {
1652  if (i++ > timeout) {
1653  rc = -ETIMEDOUT;
1654  break;
1655  }
1656 
1657  msleep(10);
1658  }
1659  }
1660 
1661  } else
1662  rc = -ENODEV;
1663 
1664 loopback_mode_exit:
1665  lpfc_bsg_diag_mode_exit(phba);
1666 
1667  /*
1668  * Let SLI layer release mboxq if mbox command completed after timeout.
1669  */
1670  if (pmboxq && mbxstatus != MBX_TIMEOUT)
1671  mempool_free(pmboxq, phba->mbox_mem_pool);
1672 
1673 job_error:
1674  /* make error code available to userspace */
1675  job->reply->result = rc;
1676  /* complete the job back to userspace if no error */
1677  if (rc == 0)
1678  job->job_done(job);
1679  return rc;
1680 }
1681 
1690 static int
1691 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1692 {
1693  LPFC_MBOXQ_t *pmboxq;
1694  struct lpfc_mbx_set_link_diag_state *link_diag_state;
1695  uint32_t req_len, alloc_len;
1696  int mbxstatus = MBX_SUCCESS, rc;
1697 
1698  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1699  if (!pmboxq)
1700  return -ENOMEM;
1701 
1702  req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1704  alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1706  req_len, LPFC_SLI4_MBX_EMBED);
1707  if (alloc_len != req_len) {
1708  rc = -ENOMEM;
1709  goto link_diag_state_set_out;
1710  }
1712  "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1713  diag, phba->sli4_hba.lnk_info.lnk_tp,
1714  phba->sli4_hba.lnk_info.lnk_no);
1715 
1716  link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1717  bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1719  bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1720  phba->sli4_hba.lnk_info.lnk_no);
1721  bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1722  phba->sli4_hba.lnk_info.lnk_tp);
1723  if (diag)
1724  bf_set(lpfc_mbx_set_diag_state_diag,
1725  &link_diag_state->u.req, 1);
1726  else
1727  bf_set(lpfc_mbx_set_diag_state_diag,
1728  &link_diag_state->u.req, 0);
1729 
1730  mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1731 
1732  if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1733  rc = 0;
1734  else
1735  rc = -ENODEV;
1736 
1737 link_diag_state_set_out:
1738  if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1739  mempool_free(pmboxq, phba->mbox_mem_pool);
1740 
1741  return rc;
1742 }
1743 
1751 static int
1752 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1753 {
1754  LPFC_MBOXQ_t *pmboxq;
1755  uint32_t req_len, alloc_len;
1756  struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1757  int mbxstatus = MBX_SUCCESS, rc = 0;
1758 
1759  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1760  if (!pmboxq)
1761  return -ENOMEM;
1762  req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1764  alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1766  req_len, LPFC_SLI4_MBX_EMBED);
1767  if (alloc_len != req_len) {
1768  mempool_free(pmboxq, phba->mbox_mem_pool);
1769  return -ENOMEM;
1770  }
1771  link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1772  bf_set(lpfc_mbx_set_diag_state_link_num,
1773  &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1774  bf_set(lpfc_mbx_set_diag_state_link_type,
1775  &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1776  bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1778 
1779  mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1780  if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1782  "3127 Failed setup loopback mode mailbox "
1783  "command, rc:x%x, status:x%x\n", mbxstatus,
1784  pmboxq->u.mb.mbxStatus);
1785  rc = -ENODEV;
1786  }
1787  if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1788  mempool_free(pmboxq, phba->mbox_mem_pool);
1789  return rc;
1790 }
1791 
1799 static int
1800 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1801 {
1802  int rc;
1803 
1804  if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1806  "3136 Port still had vfi registered: "
1807  "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1808  phba->pport->fc_myDID, phba->fcf.fcfi,
1809  phba->sli4_hba.vfi_ids[phba->pport->vfi],
1810  phba->vpi_ids[phba->pport->vpi]);
1811  return -EINVAL;
1812  }
1813  rc = lpfc_issue_reg_vfi(phba->pport);
1814  return rc;
1815 }
1816 
1825 static int
1826 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1827 {
1828  struct diag_mode_set *loopback_mode;
1829  uint32_t link_flags, timeout;
1830  int i, rc = 0;
1831 
1832  /* no data to return just the return code */
1833  job->reply->reply_payload_rcv_len = 0;
1834 
1835  if (job->request_len < sizeof(struct fc_bsg_request) +
1836  sizeof(struct diag_mode_set)) {
1838  "3011 Received DIAG MODE request size:%d "
1839  "below the minimum size:%d\n",
1840  job->request_len,
1841  (int)(sizeof(struct fc_bsg_request) +
1842  sizeof(struct diag_mode_set)));
1843  rc = -EINVAL;
1844  goto job_error;
1845  }
1846 
1847  rc = lpfc_bsg_diag_mode_enter(phba);
1848  if (rc)
1849  goto job_error;
1850 
1851  /* indicate we are in loobpack diagnostic mode */
1852  spin_lock_irq(&phba->hbalock);
1853  phba->link_flag |= LS_LOOPBACK_MODE;
1854  spin_unlock_irq(&phba->hbalock);
1855 
1856  /* reset port to start frome scratch */
1857  rc = lpfc_selective_reset(phba);
1858  if (rc)
1859  goto job_error;
1860 
1861  /* bring the link to diagnostic mode */
1863  "3129 Bring link to diagnostic state.\n");
1864  loopback_mode = (struct diag_mode_set *)
1865  job->request->rqst_data.h_vendor.vendor_cmd;
1866  link_flags = loopback_mode->type;
1867  timeout = loopback_mode->timeout * 100;
1868 
1869  rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1870  if (rc) {
1872  "3130 Failed to bring link to diagnostic "
1873  "state, rc:x%x\n", rc);
1874  goto loopback_mode_exit;
1875  }
1876 
1877  /* wait for link down before proceeding */
1878  i = 0;
1879  while (phba->link_state != LPFC_LINK_DOWN) {
1880  if (i++ > timeout) {
1881  rc = -ETIMEDOUT;
1883  "3131 Timeout waiting for link to "
1884  "diagnostic mode, timeout:%d ms\n",
1885  timeout * 10);
1886  goto loopback_mode_exit;
1887  }
1888  msleep(10);
1889  }
1890 
1891  /* set up loopback mode */
1893  "3132 Set up loopback mode:x%x\n", link_flags);
1894 
1895  if (link_flags == INTERNAL_LOOP_BACK)
1896  rc = lpfc_sli4_bsg_set_internal_loopback(phba);
1897  else if (link_flags == EXTERNAL_LOOP_BACK)
1900  MBX_NOWAIT);
1901  else {
1902  rc = -EINVAL;
1904  "3141 Loopback mode:x%x not supported\n",
1905  link_flags);
1906  goto loopback_mode_exit;
1907  }
1908 
1909  if (!rc) {
1910  /* wait for the link attention interrupt */
1911  msleep(100);
1912  i = 0;
1913  while (phba->link_state < LPFC_LINK_UP) {
1914  if (i++ > timeout) {
1915  rc = -ETIMEDOUT;
1917  "3137 Timeout waiting for link up "
1918  "in loopback mode, timeout:%d ms\n",
1919  timeout * 10);
1920  break;
1921  }
1922  msleep(10);
1923  }
1924  }
1925 
1926  /* port resource registration setup for loopback diagnostic */
1927  if (!rc) {
1928  /* set up a none zero myDID for loopback test */
1929  phba->pport->fc_myDID = 1;
1930  rc = lpfc_sli4_diag_fcport_reg_setup(phba);
1931  } else
1932  goto loopback_mode_exit;
1933 
1934  if (!rc) {
1935  /* wait for the port ready */
1936  msleep(100);
1937  i = 0;
1938  while (phba->link_state != LPFC_HBA_READY) {
1939  if (i++ > timeout) {
1940  rc = -ETIMEDOUT;
1942  "3133 Timeout waiting for port "
1943  "loopback mode ready, timeout:%d ms\n",
1944  timeout * 10);
1945  break;
1946  }
1947  msleep(10);
1948  }
1949  }
1950 
1951 loopback_mode_exit:
1952  /* clear loopback diagnostic mode */
1953  if (rc) {
1954  spin_lock_irq(&phba->hbalock);
1955  phba->link_flag &= ~LS_LOOPBACK_MODE;
1956  spin_unlock_irq(&phba->hbalock);
1957  }
1958  lpfc_bsg_diag_mode_exit(phba);
1959 
1960 job_error:
1961  /* make error code available to userspace */
1962  job->reply->result = rc;
1963  /* complete the job back to userspace if no error */
1964  if (rc == 0)
1965  job->job_done(job);
1966  return rc;
1967 }
1968 
1976 static int
1977 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1978 {
1979  struct Scsi_Host *shost;
1980  struct lpfc_vport *vport;
1981  struct lpfc_hba *phba;
1982  int rc;
1983 
1984  shost = job->shost;
1985  if (!shost)
1986  return -ENODEV;
1987  vport = (struct lpfc_vport *)job->shost->hostdata;
1988  if (!vport)
1989  return -ENODEV;
1990  phba = vport->phba;
1991  if (!phba)
1992  return -ENODEV;
1993 
1994  if (phba->sli_rev < LPFC_SLI_REV4)
1995  rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1996  else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1998  rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1999  else
2000  rc = -ENODEV;
2001 
2002  return rc;
2003 }
2004 
2012 static int
2013 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2014 {
2015  struct Scsi_Host *shost;
2016  struct lpfc_vport *vport;
2017  struct lpfc_hba *phba;
2018  struct diag_mode_set *loopback_mode_end_cmd;
2019  uint32_t timeout;
2020  int rc, i;
2021 
2022  shost = job->shost;
2023  if (!shost)
2024  return -ENODEV;
2025  vport = (struct lpfc_vport *)job->shost->hostdata;
2026  if (!vport)
2027  return -ENODEV;
2028  phba = vport->phba;
2029  if (!phba)
2030  return -ENODEV;
2031 
2032  if (phba->sli_rev < LPFC_SLI_REV4)
2033  return -ENODEV;
2034  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2036  return -ENODEV;
2037 
2038  /* clear loopback diagnostic mode */
2039  spin_lock_irq(&phba->hbalock);
2040  phba->link_flag &= ~LS_LOOPBACK_MODE;
2041  spin_unlock_irq(&phba->hbalock);
2042  loopback_mode_end_cmd = (struct diag_mode_set *)
2043  job->request->rqst_data.h_vendor.vendor_cmd;
2044  timeout = loopback_mode_end_cmd->timeout * 100;
2045 
2046  rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2047  if (rc) {
2049  "3139 Failed to bring link to diagnostic "
2050  "state, rc:x%x\n", rc);
2051  goto loopback_mode_end_exit;
2052  }
2053 
2054  /* wait for link down before proceeding */
2055  i = 0;
2056  while (phba->link_state != LPFC_LINK_DOWN) {
2057  if (i++ > timeout) {
2058  rc = -ETIMEDOUT;
2060  "3140 Timeout waiting for link to "
2061  "diagnostic mode_end, timeout:%d ms\n",
2062  timeout * 10);
2063  /* there is nothing much we can do here */
2064  break;
2065  }
2066  msleep(10);
2067  }
2068 
2069  /* reset port resource registrations */
2070  rc = lpfc_selective_reset(phba);
2071  phba->pport->fc_myDID = 0;
2072 
2073 loopback_mode_end_exit:
2074  /* make return code available to userspace */
2075  job->reply->result = rc;
2076  /* complete the job back to userspace if no error */
2077  if (rc == 0)
2078  job->job_done(job);
2079  return rc;
2080 }
2081 
2089 static int
2090 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2091 {
2092  struct Scsi_Host *shost;
2093  struct lpfc_vport *vport;
2094  struct lpfc_hba *phba;
2095  LPFC_MBOXQ_t *pmboxq;
2096  struct sli4_link_diag *link_diag_test_cmd;
2097  uint32_t req_len, alloc_len;
2098  uint32_t timeout;
2099  struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2100  union lpfc_sli4_cfg_shdr *shdr;
2101  uint32_t shdr_status, shdr_add_status;
2102  struct diag_status *diag_status_reply;
2103  int mbxstatus, rc = 0;
2104 
2105  shost = job->shost;
2106  if (!shost) {
2107  rc = -ENODEV;
2108  goto job_error;
2109  }
2110  vport = (struct lpfc_vport *)job->shost->hostdata;
2111  if (!vport) {
2112  rc = -ENODEV;
2113  goto job_error;
2114  }
2115  phba = vport->phba;
2116  if (!phba) {
2117  rc = -ENODEV;
2118  goto job_error;
2119  }
2120 
2121  if (phba->sli_rev < LPFC_SLI_REV4) {
2122  rc = -ENODEV;
2123  goto job_error;
2124  }
2125  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2127  rc = -ENODEV;
2128  goto job_error;
2129  }
2130 
2131  if (job->request_len < sizeof(struct fc_bsg_request) +
2132  sizeof(struct sli4_link_diag)) {
2134  "3013 Received LINK DIAG TEST request "
2135  " size:%d below the minimum size:%d\n",
2136  job->request_len,
2137  (int)(sizeof(struct fc_bsg_request) +
2138  sizeof(struct sli4_link_diag)));
2139  rc = -EINVAL;
2140  goto job_error;
2141  }
2142 
2143  rc = lpfc_bsg_diag_mode_enter(phba);
2144  if (rc)
2145  goto job_error;
2146 
2147  link_diag_test_cmd = (struct sli4_link_diag *)
2148  job->request->rqst_data.h_vendor.vendor_cmd;
2149  timeout = link_diag_test_cmd->timeout * 100;
2150 
2151  rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2152 
2153  if (rc)
2154  goto job_error;
2155 
2156  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2157  if (!pmboxq) {
2158  rc = -ENOMEM;
2159  goto link_diag_test_exit;
2160  }
2161 
2162  req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2164  alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2166  req_len, LPFC_SLI4_MBX_EMBED);
2167  if (alloc_len != req_len) {
2168  rc = -ENOMEM;
2169  goto link_diag_test_exit;
2170  }
2171  run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2172  bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2173  phba->sli4_hba.lnk_info.lnk_no);
2174  bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2175  phba->sli4_hba.lnk_info.lnk_tp);
2176  bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2177  link_diag_test_cmd->test_id);
2178  bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2179  link_diag_test_cmd->loops);
2180  bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2181  link_diag_test_cmd->test_version);
2182  bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2183  link_diag_test_cmd->error_action);
2184 
2185  mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2186 
2187  shdr = (union lpfc_sli4_cfg_shdr *)
2188  &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2189  shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2190  shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2191  if (shdr_status || shdr_add_status || mbxstatus) {
2193  "3010 Run link diag test mailbox failed with "
2194  "mbx_status x%x status x%x, add_status x%x\n",
2195  mbxstatus, shdr_status, shdr_add_status);
2196  }
2197 
2198  diag_status_reply = (struct diag_status *)
2199  job->reply->reply_data.vendor_reply.vendor_rsp;
2200 
2201  if (job->reply_len <
2202  sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2204  "3012 Received Run link diag test reply "
2205  "below minimum size (%d): reply_len:%d\n",
2206  (int)(sizeof(struct fc_bsg_request) +
2207  sizeof(struct diag_status)),
2208  job->reply_len);
2209  rc = -EINVAL;
2210  goto job_error;
2211  }
2212 
2213  diag_status_reply->mbox_status = mbxstatus;
2214  diag_status_reply->shdr_status = shdr_status;
2215  diag_status_reply->shdr_add_status = shdr_add_status;
2216 
2217 link_diag_test_exit:
2218  rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2219 
2220  if (pmboxq)
2221  mempool_free(pmboxq, phba->mbox_mem_pool);
2222 
2223  lpfc_bsg_diag_mode_exit(phba);
2224 
2225 job_error:
2226  /* make error code available to userspace */
2227  job->reply->result = rc;
2228  /* complete the job back to userspace if no error */
2229  if (rc == 0)
2230  job->job_done(job);
2231  return rc;
2232 }
2233 
2242 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2243 {
2244  LPFC_MBOXQ_t *mbox;
2245  struct lpfc_dmabuf *dmabuff;
2246  int status;
2247 
2248  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2249  if (!mbox)
2250  return -ENOMEM;
2251 
2252  if (phba->sli_rev < LPFC_SLI_REV4)
2253  status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2254  (uint8_t *)&phba->pport->fc_sparam,
2255  mbox, *rpi);
2256  else {
2257  *rpi = lpfc_sli4_alloc_rpi(phba);
2258  status = lpfc_reg_rpi(phba, phba->pport->vpi,
2259  phba->pport->fc_myDID,
2260  (uint8_t *)&phba->pport->fc_sparam,
2261  mbox, *rpi);
2262  }
2263 
2264  if (status) {
2265  mempool_free(mbox, phba->mbox_mem_pool);
2266  if (phba->sli_rev == LPFC_SLI_REV4)
2267  lpfc_sli4_free_rpi(phba, *rpi);
2268  return -ENOMEM;
2269  }
2270 
2271  dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2272  mbox->context1 = NULL;
2273  mbox->context2 = NULL;
2274  status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2275 
2276  if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2277  lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2278  kfree(dmabuff);
2279  if (status != MBX_TIMEOUT)
2280  mempool_free(mbox, phba->mbox_mem_pool);
2281  if (phba->sli_rev == LPFC_SLI_REV4)
2282  lpfc_sli4_free_rpi(phba, *rpi);
2283  return -ENODEV;
2284  }
2285 
2286  if (phba->sli_rev < LPFC_SLI_REV4)
2287  *rpi = mbox->u.mb.un.varWords[0];
2288 
2289  lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2290  kfree(dmabuff);
2291  mempool_free(mbox, phba->mbox_mem_pool);
2292  return 0;
2293 }
2294 
2302 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2303 {
2304  LPFC_MBOXQ_t *mbox;
2305  int status;
2306 
2307  /* Allocate mboxq structure */
2308  mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2309  if (mbox == NULL)
2310  return -ENOMEM;
2311 
2312  if (phba->sli_rev < LPFC_SLI_REV4)
2313  lpfc_unreg_login(phba, 0, rpi, mbox);
2314  else
2315  lpfc_unreg_login(phba, phba->pport->vpi,
2316  phba->sli4_hba.rpi_ids[rpi], mbox);
2317 
2318  status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2319 
2320  if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2321  if (status != MBX_TIMEOUT)
2322  mempool_free(mbox, phba->mbox_mem_pool);
2323  return -EIO;
2324  }
2325  mempool_free(mbox, phba->mbox_mem_pool);
2326  if (phba->sli_rev == LPFC_SLI_REV4)
2327  lpfc_sli4_free_rpi(phba, rpi);
2328  return 0;
2329 }
2330 
2343 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2344  uint16_t *txxri, uint16_t * rxxri)
2345 {
2346  struct lpfc_bsg_event *evt;
2347  struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2348  IOCB_t *cmd, *rsp;
2349  struct lpfc_dmabuf *dmabuf;
2350  struct ulp_bde64 *bpl = NULL;
2351  struct lpfc_sli_ct_request *ctreq = NULL;
2352  int ret_val = 0;
2353  int time_left;
2354  int iocb_stat = 0;
2355  unsigned long flags;
2356 
2357  *txxri = 0;
2358  *rxxri = 0;
2359  evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2361  if (!evt)
2362  return -ENOMEM;
2363 
2364  spin_lock_irqsave(&phba->ct_ev_lock, flags);
2365  list_add(&evt->node, &phba->ct_ev_waiters);
2366  lpfc_bsg_event_ref(evt);
2367  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2368 
2369  cmdiocbq = lpfc_sli_get_iocbq(phba);
2370  rspiocbq = lpfc_sli_get_iocbq(phba);
2371 
2372  dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2373  if (dmabuf) {
2374  dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2375  if (dmabuf->virt) {
2376  INIT_LIST_HEAD(&dmabuf->list);
2377  bpl = (struct ulp_bde64 *) dmabuf->virt;
2378  memset(bpl, 0, sizeof(*bpl));
2379  ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2380  bpl->addrHigh =
2381  le32_to_cpu(putPaddrHigh(dmabuf->phys +
2382  sizeof(*bpl)));
2383  bpl->addrLow =
2384  le32_to_cpu(putPaddrLow(dmabuf->phys +
2385  sizeof(*bpl)));
2386  bpl->tus.f.bdeFlags = 0;
2387  bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2388  bpl->tus.w = le32_to_cpu(bpl->tus.w);
2389  }
2390  }
2391 
2392  if (cmdiocbq == NULL || rspiocbq == NULL ||
2393  dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2394  dmabuf->virt == NULL) {
2395  ret_val = -ENOMEM;
2396  goto err_get_xri_exit;
2397  }
2398 
2399  cmd = &cmdiocbq->iocb;
2400  rsp = &rspiocbq->iocb;
2401 
2402  memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2403 
2404  ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2405  ctreq->RevisionId.bits.InId = 0;
2406  ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2407  ctreq->FsSubType = 0;
2409  ctreq->CommandResponse.bits.Size = 0;
2410 
2411 
2412  cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2413  cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2415  cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2416 
2417  cmd->un.xseq64.w5.hcsw.Fctl = LA;
2418  cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2419  cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2420  cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2421 
2423  cmd->ulpBdeCount = 1;
2424  cmd->ulpLe = 1;
2425  cmd->ulpClass = CLASS3;
2426  cmd->ulpContext = rpi;
2427 
2428  cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2429  cmdiocbq->vport = phba->pport;
2430 
2431  iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2432  rspiocbq,
2433  (phba->fc_ratov * 2)
2434  + LPFC_DRVR_TIMEOUT);
2435  if (iocb_stat) {
2436  ret_val = -EIO;
2437  goto err_get_xri_exit;
2438  }
2439  *txxri = rsp->ulpContext;
2440 
2441  evt->waiting = 1;
2442  evt->wait_time_stamp = jiffies;
2444  evt->wq, !list_empty(&evt->events_to_see),
2445  ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2446  if (list_empty(&evt->events_to_see))
2447  ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2448  else {
2449  spin_lock_irqsave(&phba->ct_ev_lock, flags);
2450  list_move(evt->events_to_see.prev, &evt->events_to_get);
2451  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2452  *rxxri = (list_entry(evt->events_to_get.prev,
2453  typeof(struct event_data),
2454  node))->immed_dat;
2455  }
2456  evt->waiting = 0;
2457 
2458 err_get_xri_exit:
2459  spin_lock_irqsave(&phba->ct_ev_lock, flags);
2460  lpfc_bsg_event_unref(evt); /* release ref */
2461  lpfc_bsg_event_unref(evt); /* delete */
2462  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2463 
2464  if (dmabuf) {
2465  if (dmabuf->virt)
2466  lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2467  kfree(dmabuf);
2468  }
2469 
2470  if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2471  lpfc_sli_release_iocbq(phba, cmdiocbq);
2472  if (rspiocbq)
2473  lpfc_sli_release_iocbq(phba, rspiocbq);
2474  return ret_val;
2475 }
2476 
2484 static struct lpfc_dmabuf *
2485 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2486 {
2487  struct lpfc_dmabuf *dmabuf;
2488  struct pci_dev *pcidev = phba->pcidev;
2489 
2490  /* allocate dma buffer struct */
2491  dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2492  if (!dmabuf)
2493  return NULL;
2494 
2495  INIT_LIST_HEAD(&dmabuf->list);
2496 
2497  /* now, allocate dma buffer */
2498  dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2499  &(dmabuf->phys), GFP_KERNEL);
2500 
2501  if (!dmabuf->virt) {
2502  kfree(dmabuf);
2503  return NULL;
2504  }
2505  memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2506 
2507  return dmabuf;
2508 }
2509 
2518 static void
2519 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2520 {
2521  struct pci_dev *pcidev = phba->pcidev;
2522 
2523  if (!dmabuf)
2524  return;
2525 
2526  if (dmabuf->virt)
2528  dmabuf->virt, dmabuf->phys);
2529  kfree(dmabuf);
2530  return;
2531 }
2532 
2541 static void
2542 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2543  struct list_head *dmabuf_list)
2544 {
2545  struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2546 
2547  if (list_empty(dmabuf_list))
2548  return;
2549 
2550  list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2551  list_del_init(&dmabuf->list);
2552  lpfc_bsg_dma_page_free(phba, dmabuf);
2553  }
2554  return;
2555 }
2556 
2568 static struct lpfc_dmabufext *
2569 diag_cmd_data_alloc(struct lpfc_hba *phba,
2570  struct ulp_bde64 *bpl, uint32_t size,
2571  int nocopydata)
2572 {
2573  struct lpfc_dmabufext *mlist = NULL;
2574  struct lpfc_dmabufext *dmp;
2575  int cnt, offset = 0, i = 0;
2576  struct pci_dev *pcidev;
2577 
2578  pcidev = phba->pcidev;
2579 
2580  while (size) {
2581  /* We get chunks of 4K */
2582  if (size > BUF_SZ_4K)
2583  cnt = BUF_SZ_4K;
2584  else
2585  cnt = size;
2586 
2587  /* allocate struct lpfc_dmabufext buffer header */
2588  dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2589  if (!dmp)
2590  goto out;
2591 
2592  INIT_LIST_HEAD(&dmp->dma.list);
2593 
2594  /* Queue it to a linked list */
2595  if (mlist)
2596  list_add_tail(&dmp->dma.list, &mlist->dma.list);
2597  else
2598  mlist = dmp;
2599 
2600  /* allocate buffer */
2601  dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2602  cnt,
2603  &(dmp->dma.phys),
2604  GFP_KERNEL);
2605 
2606  if (!dmp->dma.virt)
2607  goto out;
2608 
2609  dmp->size = cnt;
2610 
2611  if (nocopydata) {
2612  bpl->tus.f.bdeFlags = 0;
2613  pci_dma_sync_single_for_device(phba->pcidev,
2614  dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2615 
2616  } else {
2617  memset((uint8_t *)dmp->dma.virt, 0, cnt);
2618  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2619  }
2620 
2621  /* build buffer ptr list for IOCB */
2622  bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2623  bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2624  bpl->tus.f.bdeSize = (ushort) cnt;
2625  bpl->tus.w = le32_to_cpu(bpl->tus.w);
2626  bpl++;
2627 
2628  i++;
2629  offset += cnt;
2630  size -= cnt;
2631  }
2632 
2633  mlist->flag = i;
2634  return mlist;
2635 out:
2636  diag_cmd_data_free(phba, mlist);
2637  return NULL;
2638 }
2639 
2649 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2650  size_t len)
2651 {
2652  struct lpfc_sli *psli = &phba->sli;
2653  struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2654  struct lpfc_iocbq *cmdiocbq;
2655  IOCB_t *cmd = NULL;
2656  struct list_head head, *curr, *next;
2657  struct lpfc_dmabuf *rxbmp;
2658  struct lpfc_dmabuf *dmp;
2659  struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2660  struct ulp_bde64 *rxbpl = NULL;
2661  uint32_t num_bde;
2662  struct lpfc_dmabufext *rxbuffer = NULL;
2663  int ret_val = 0;
2664  int iocb_stat;
2665  int i = 0;
2666 
2667  cmdiocbq = lpfc_sli_get_iocbq(phba);
2668  rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2669  if (rxbmp != NULL) {
2670  rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2671  if (rxbmp->virt) {
2672  INIT_LIST_HEAD(&rxbmp->list);
2673  rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2674  rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2675  }
2676  }
2677 
2678  if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2679  ret_val = -ENOMEM;
2680  goto err_post_rxbufs_exit;
2681  }
2682 
2683  /* Queue buffers for the receive exchange */
2684  num_bde = (uint32_t)rxbuffer->flag;
2685  dmp = &rxbuffer->dma;
2686 
2687  cmd = &cmdiocbq->iocb;
2688  i = 0;
2689 
2690  INIT_LIST_HEAD(&head);
2691  list_add_tail(&head, &dmp->list);
2692  list_for_each_safe(curr, next, &head) {
2693  mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2694  list_del(curr);
2695 
2696  if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2697  mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2698  cmd->un.quexri64cx.buff.bde.addrHigh =
2699  putPaddrHigh(mp[i]->phys);
2700  cmd->un.quexri64cx.buff.bde.addrLow =
2701  putPaddrLow(mp[i]->phys);
2702  cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2703  ((struct lpfc_dmabufext *)mp[i])->size;
2704  cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2706  cmd->ulpPU = 0;
2707  cmd->ulpLe = 1;
2708  cmd->ulpBdeCount = 1;
2709  cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2710 
2711  } else {
2712  cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2713  cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2714  cmd->un.cont64[i].tus.f.bdeSize =
2715  ((struct lpfc_dmabufext *)mp[i])->size;
2716  cmd->ulpBdeCount = ++i;
2717 
2718  if ((--num_bde > 0) && (i < 2))
2719  continue;
2720 
2722  cmd->ulpLe = 1;
2723  }
2724 
2725  cmd->ulpClass = CLASS3;
2726  cmd->ulpContext = rxxri;
2727 
2728  iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2729  0);
2730  if (iocb_stat == IOCB_ERROR) {
2731  diag_cmd_data_free(phba,
2732  (struct lpfc_dmabufext *)mp[0]);
2733  if (mp[1])
2734  diag_cmd_data_free(phba,
2735  (struct lpfc_dmabufext *)mp[1]);
2736  dmp = list_entry(next, struct lpfc_dmabuf, list);
2737  ret_val = -EIO;
2738  goto err_post_rxbufs_exit;
2739  }
2740 
2741  lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2742  if (mp[1]) {
2743  lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2744  mp[1] = NULL;
2745  }
2746 
2747  /* The iocb was freed by lpfc_sli_issue_iocb */
2748  cmdiocbq = lpfc_sli_get_iocbq(phba);
2749  if (!cmdiocbq) {
2750  dmp = list_entry(next, struct lpfc_dmabuf, list);
2751  ret_val = -EIO;
2752  goto err_post_rxbufs_exit;
2753  }
2754 
2755  cmd = &cmdiocbq->iocb;
2756  i = 0;
2757  }
2758  list_del(&head);
2759 
2760 err_post_rxbufs_exit:
2761 
2762  if (rxbmp) {
2763  if (rxbmp->virt)
2764  lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2765  kfree(rxbmp);
2766  }
2767 
2768  if (cmdiocbq)
2769  lpfc_sli_release_iocbq(phba, cmdiocbq);
2770  return ret_val;
2771 }
2772 
2792 static int
2793 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2794 {
2795  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2796  struct lpfc_hba *phba = vport->phba;
2797  struct diag_mode_test *diag_mode;
2798  struct lpfc_bsg_event *evt;
2799  struct event_data *evdat;
2800  struct lpfc_sli *psli = &phba->sli;
2801  uint32_t size;
2802  uint32_t full_size;
2803  size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2804  uint16_t rpi = 0;
2805  struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2806  IOCB_t *cmd, *rsp = NULL;
2807  struct lpfc_sli_ct_request *ctreq;
2808  struct lpfc_dmabuf *txbmp;
2809  struct ulp_bde64 *txbpl = NULL;
2810  struct lpfc_dmabufext *txbuffer = NULL;
2811  struct list_head head;
2812  struct lpfc_dmabuf *curr;
2813  uint16_t txxri = 0, rxxri;
2814  uint32_t num_bde;
2815  uint8_t *ptr = NULL, *rx_databuf = NULL;
2816  int rc = 0;
2817  int time_left;
2818  int iocb_stat;
2819  unsigned long flags;
2820  void *dataout = NULL;
2821  uint32_t total_mem;
2822 
2823  /* in case no data is returned return just the return code */
2824  job->reply->reply_payload_rcv_len = 0;
2825 
2826  if (job->request_len <
2827  sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2829  "2739 Received DIAG TEST request below minimum "
2830  "size\n");
2831  rc = -EINVAL;
2832  goto loopback_test_exit;
2833  }
2834 
2835  if (job->request_payload.payload_len !=
2836  job->reply_payload.payload_len) {
2837  rc = -EINVAL;
2838  goto loopback_test_exit;
2839  }
2840  diag_mode = (struct diag_mode_test *)
2841  job->request->rqst_data.h_vendor.vendor_cmd;
2842 
2843  if ((phba->link_state == LPFC_HBA_ERROR) ||
2844  (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2845  (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2846  rc = -EACCES;
2847  goto loopback_test_exit;
2848  }
2849 
2850  if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2851  rc = -EACCES;
2852  goto loopback_test_exit;
2853  }
2854 
2855  size = job->request_payload.payload_len;
2856  full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2857 
2858  if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2859  rc = -ERANGE;
2860  goto loopback_test_exit;
2861  }
2862 
2863  if (full_size >= BUF_SZ_4K) {
2864  /*
2865  * Allocate memory for ioctl data. If buffer is bigger than 64k,
2866  * then we allocate 64k and re-use that buffer over and over to
2867  * xfer the whole block. This is because Linux kernel has a
2868  * problem allocating more than 120k of kernel space memory. Saw
2869  * problem with GET_FCPTARGETMAPPING...
2870  */
2871  if (size <= (64 * 1024))
2872  total_mem = full_size;
2873  else
2874  total_mem = 64 * 1024;
2875  } else
2876  /* Allocate memory for ioctl data */
2877  total_mem = BUF_SZ_4K;
2878 
2879  dataout = kmalloc(total_mem, GFP_KERNEL);
2880  if (dataout == NULL) {
2881  rc = -ENOMEM;
2882  goto loopback_test_exit;
2883  }
2884 
2885  ptr = dataout;
2887  sg_copy_to_buffer(job->request_payload.sg_list,
2888  job->request_payload.sg_cnt,
2889  ptr, size);
2890  rc = lpfcdiag_loop_self_reg(phba, &rpi);
2891  if (rc)
2892  goto loopback_test_exit;
2893 
2894  if (phba->sli_rev < LPFC_SLI_REV4) {
2895  rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2896  if (rc) {
2897  lpfcdiag_loop_self_unreg(phba, rpi);
2898  goto loopback_test_exit;
2899  }
2900 
2901  rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2902  if (rc) {
2903  lpfcdiag_loop_self_unreg(phba, rpi);
2904  goto loopback_test_exit;
2905  }
2906  }
2907  evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2909  if (!evt) {
2910  lpfcdiag_loop_self_unreg(phba, rpi);
2911  rc = -ENOMEM;
2912  goto loopback_test_exit;
2913  }
2914 
2915  spin_lock_irqsave(&phba->ct_ev_lock, flags);
2916  list_add(&evt->node, &phba->ct_ev_waiters);
2917  lpfc_bsg_event_ref(evt);
2918  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2919 
2920  cmdiocbq = lpfc_sli_get_iocbq(phba);
2921  if (phba->sli_rev < LPFC_SLI_REV4)
2922  rspiocbq = lpfc_sli_get_iocbq(phba);
2923  txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2924 
2925  if (txbmp) {
2926  txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2927  if (txbmp->virt) {
2928  INIT_LIST_HEAD(&txbmp->list);
2929  txbpl = (struct ulp_bde64 *) txbmp->virt;
2930  txbuffer = diag_cmd_data_alloc(phba,
2931  txbpl, full_size, 0);
2932  }
2933  }
2934 
2935  if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
2936  rc = -ENOMEM;
2937  goto err_loopback_test_exit;
2938  }
2939  if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
2940  rc = -ENOMEM;
2941  goto err_loopback_test_exit;
2942  }
2943 
2944  cmd = &cmdiocbq->iocb;
2945  if (phba->sli_rev < LPFC_SLI_REV4)
2946  rsp = &rspiocbq->iocb;
2947 
2948  INIT_LIST_HEAD(&head);
2949  list_add_tail(&head, &txbuffer->dma.list);
2950  list_for_each_entry(curr, &head, list) {
2951  segment_len = ((struct lpfc_dmabufext *)curr)->size;
2952  if (current_offset == 0) {
2953  ctreq = curr->virt;
2954  memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2955  ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2956  ctreq->RevisionId.bits.InId = 0;
2957  ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2958  ctreq->FsSubType = 0;
2959  ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2960  ctreq->CommandResponse.bits.Size = size;
2961  segment_offset = ELX_LOOPBACK_HEADER_SZ;
2962  } else
2963  segment_offset = 0;
2964 
2965  BUG_ON(segment_offset >= segment_len);
2966  memcpy(curr->virt + segment_offset,
2967  ptr + current_offset,
2968  segment_len - segment_offset);
2969 
2970  current_offset += segment_len - segment_offset;
2971  BUG_ON(current_offset > size);
2972  }
2973  list_del(&head);
2974 
2975  /* Build the XMIT_SEQUENCE iocb */
2976  num_bde = (uint32_t)txbuffer->flag;
2977 
2978  cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2979  cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2980  cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2981  cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2982 
2983  cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2984  cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2985  cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2986  cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2987 
2988  cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2989  cmd->ulpBdeCount = 1;
2990  cmd->ulpLe = 1;
2991  cmd->ulpClass = CLASS3;
2992 
2993  if (phba->sli_rev < LPFC_SLI_REV4) {
2994  cmd->ulpContext = txxri;
2995  } else {
2996  cmd->un.xseq64.bdl.ulpIoTag32 = 0;
2997  cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
2998  cmdiocbq->context3 = txbmp;
2999  cmdiocbq->sli4_xritag = NO_XRI;
3000  cmd->unsli3.rcvsli3.ox_id = 0xffff;
3001  }
3002  cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3003  cmdiocbq->vport = phba->pport;
3004  iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3005  rspiocbq, (phba->fc_ratov * 2) +
3007 
3008  if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
3009  (rsp->ulpStatus != IOCB_SUCCESS))) {
3011  "3126 Failed loopback test issue iocb: "
3012  "iocb_stat:x%x\n", iocb_stat);
3013  rc = -EIO;
3014  goto err_loopback_test_exit;
3015  }
3016 
3017  evt->waiting = 1;
3019  evt->wq, !list_empty(&evt->events_to_see),
3020  ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
3021  evt->waiting = 0;
3022  if (list_empty(&evt->events_to_see)) {
3023  rc = (time_left) ? -EINTR : -ETIMEDOUT;
3025  "3125 Not receiving unsolicited event, "
3026  "rc:x%x\n", rc);
3027  } else {
3028  spin_lock_irqsave(&phba->ct_ev_lock, flags);
3029  list_move(evt->events_to_see.prev, &evt->events_to_get);
3030  evdat = list_entry(evt->events_to_get.prev,
3031  typeof(*evdat), node);
3032  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3033  rx_databuf = evdat->data;
3034  if (evdat->len != full_size) {
3036  "1603 Loopback test did not receive expected "
3037  "data length. actual length 0x%x expected "
3038  "length 0x%x\n",
3039  evdat->len, full_size);
3040  rc = -EIO;
3041  } else if (rx_databuf == NULL)
3042  rc = -EIO;
3043  else {
3044  rc = IOCB_SUCCESS;
3045  /* skip over elx loopback header */
3046  rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3047  job->reply->reply_payload_rcv_len =
3048  sg_copy_from_buffer(job->reply_payload.sg_list,
3049  job->reply_payload.sg_cnt,
3050  rx_databuf, size);
3051  job->reply->reply_payload_rcv_len = size;
3052  }
3053  }
3054 
3055 err_loopback_test_exit:
3056  lpfcdiag_loop_self_unreg(phba, rpi);
3057 
3058  spin_lock_irqsave(&phba->ct_ev_lock, flags);
3059  lpfc_bsg_event_unref(evt); /* release ref */
3060  lpfc_bsg_event_unref(evt); /* delete */
3061  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3062 
3063  if (cmdiocbq != NULL)
3064  lpfc_sli_release_iocbq(phba, cmdiocbq);
3065 
3066  if (rspiocbq != NULL)
3067  lpfc_sli_release_iocbq(phba, rspiocbq);
3068 
3069  if (txbmp != NULL) {
3070  if (txbpl != NULL) {
3071  if (txbuffer != NULL)
3072  diag_cmd_data_free(phba, txbuffer);
3073  lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3074  }
3075  kfree(txbmp);
3076  }
3077 
3078 loopback_test_exit:
3079  kfree(dataout);
3080  /* make error code available to userspace */
3081  job->reply->result = rc;
3082  job->dd_data = NULL;
3083  /* complete the job back to userspace if no error */
3084  if (rc == IOCB_SUCCESS)
3085  job->job_done(job);
3086  return rc;
3087 }
3088 
3093 static int
3094 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3095 {
3096  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3097  struct lpfc_hba *phba = vport->phba;
3098  struct get_mgmt_rev *event_req;
3099  struct get_mgmt_rev_reply *event_reply;
3100  int rc = 0;
3101 
3102  if (job->request_len <
3103  sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3105  "2740 Received GET_DFC_REV request below "
3106  "minimum size\n");
3107  rc = -EINVAL;
3108  goto job_error;
3109  }
3110 
3111  event_req = (struct get_mgmt_rev *)
3112  job->request->rqst_data.h_vendor.vendor_cmd;
3113 
3114  event_reply = (struct get_mgmt_rev_reply *)
3115  job->reply->reply_data.vendor_reply.vendor_rsp;
3116 
3117  if (job->reply_len <
3118  sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3120  "2741 Received GET_DFC_REV reply below "
3121  "minimum size\n");
3122  rc = -EINVAL;
3123  goto job_error;
3124  }
3125 
3126  event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3127  event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3128 job_error:
3129  job->reply->result = rc;
3130  if (rc == 0)
3131  job->job_done(job);
3132  return rc;
3133 }
3134 
3146 void
3148 {
3149  struct bsg_job_data *dd_data;
3150  struct fc_bsg_job *job;
3151  uint32_t size;
3152  unsigned long flags;
3153  uint8_t *pmb, *pmb_buf;
3154 
3155  spin_lock_irqsave(&phba->ct_ev_lock, flags);
3156  dd_data = pmboxq->context1;
3157  /* job already timed out? */
3158  if (!dd_data) {
3159  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3160  return;
3161  }
3162 
3163  /*
3164  * The outgoing buffer is readily referred from the dma buffer,
3165  * just need to get header part from mailboxq structure.
3166  */
3167  pmb = (uint8_t *)&pmboxq->u.mb;
3168  pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3169  memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3170 
3171  job = dd_data->context_un.mbox.set_job;
3172  if (job) {
3173  size = job->reply_payload.payload_len;
3174  job->reply->reply_payload_rcv_len =
3175  sg_copy_from_buffer(job->reply_payload.sg_list,
3176  job->reply_payload.sg_cnt,
3177  pmb_buf, size);
3178  /* need to hold the lock until we set job->dd_data to NULL
3179  * to hold off the timeout handler returning to the mid-layer
3180  * while we are still processing the job.
3181  */
3182  job->dd_data = NULL;
3183  dd_data->context_un.mbox.set_job = NULL;
3184  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3185  } else {
3186  dd_data->context_un.mbox.set_job = NULL;
3187  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3188  }
3189 
3190  mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3191  lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3192  kfree(dd_data);
3193 
3194  if (job) {
3195  job->reply->result = 0;
3196  job->job_done(job);
3197  }
3198  return;
3199 }
3200 
3210 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3211  MAILBOX_t *mb, struct lpfc_vport *vport)
3212 {
3213  /* return negative error values for bsg job */
3214  switch (mb->mbxCommand) {
3215  /* Offline only */
3216  case MBX_INIT_LINK:
3217  case MBX_DOWN_LINK:
3218  case MBX_CONFIG_LINK:
3219  case MBX_CONFIG_RING:
3220  case MBX_RESET_RING:
3221  case MBX_UNREG_LOGIN:
3222  case MBX_CLEAR_LA:
3223  case MBX_DUMP_CONTEXT:
3224  case MBX_RUN_DIAGS:
3225  case MBX_RESTART:
3226  case MBX_SET_MASK:
3227  if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3229  "2743 Command 0x%x is illegal in on-line "
3230  "state\n",
3231  mb->mbxCommand);
3232  return -EPERM;
3233  }
3234  case MBX_WRITE_NV:
3235  case MBX_WRITE_VPARMS:
3236  case MBX_LOAD_SM:
3237  case MBX_READ_NV:
3238  case MBX_READ_CONFIG:
3239  case MBX_READ_RCONFIG:
3240  case MBX_READ_STATUS:
3241  case MBX_READ_XRI:
3242  case MBX_READ_REV:
3243  case MBX_READ_LNK_STAT:
3244  case MBX_DUMP_MEMORY:
3245  case MBX_DOWN_LOAD:
3246  case MBX_UPDATE_CFG:
3247  case MBX_KILL_BOARD:
3248  case MBX_LOAD_AREA:
3249  case MBX_LOAD_EXP_ROM:
3250  case MBX_BEACON:
3251  case MBX_DEL_LD_ENTRY:
3252  case MBX_SET_DEBUG:
3253  case MBX_WRITE_WWN:
3254  case MBX_SLI4_CONFIG:
3255  case MBX_READ_EVENT_LOG:
3257  case MBX_WRITE_EVENT_LOG:
3258  case MBX_PORT_CAPABILITIES:
3259  case MBX_PORT_IOV_CONTROL:
3260  case MBX_RUN_BIU_DIAG64:
3261  break;
3262  case MBX_SET_VARIABLE:
3264  "1226 mbox: set_variable 0x%x, 0x%x\n",
3265  mb->un.varWords[0],
3266  mb->un.varWords[1]);
3267  if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3268  && (mb->un.varWords[1] == 1)) {
3269  phba->wait_4_mlo_maint_flg = 1;
3270  } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3271  spin_lock_irq(&phba->hbalock);
3272  phba->link_flag &= ~LS_LOOPBACK_MODE;
3273  spin_unlock_irq(&phba->hbalock);
3275  }
3276  break;
3277  case MBX_READ_SPARM64:
3278  case MBX_READ_TOPOLOGY:
3279  case MBX_REG_LOGIN:
3280  case MBX_REG_LOGIN64:
3281  case MBX_CONFIG_PORT:
3282  case MBX_RUN_BIU_DIAG:
3283  default:
3285  "2742 Unknown Command 0x%x\n",
3286  mb->mbxCommand);
3287  return -EPERM;
3288  }
3289 
3290  return 0; /* ok */
3291 }
3292 
3300 static void
3301 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3302 {
3303  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3304  return;
3305 
3306  /* free all memory, including dma buffers */
3307  lpfc_bsg_dma_page_list_free(phba,
3308  &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3309  lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3310  /* multi-buffer write mailbox command pass-through complete */
3311  memset((char *)&phba->mbox_ext_buf_ctx, 0,
3312  sizeof(struct lpfc_mbox_ext_buf_ctx));
3313  INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3314 
3315  return;
3316 }
3317 
3326 static struct fc_bsg_job *
3327 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3328 {
3329  struct bsg_job_data *dd_data;
3330  struct fc_bsg_job *job;
3331  uint8_t *pmb, *pmb_buf;
3332  unsigned long flags;
3333  uint32_t size;
3334  int rc = 0;
3335  struct lpfc_dmabuf *dmabuf;
3336  struct lpfc_sli_config_mbox *sli_cfg_mbx;
3337  uint8_t *pmbx;
3338 
3339  spin_lock_irqsave(&phba->ct_ev_lock, flags);
3340  dd_data = pmboxq->context1;
3341  /* has the job already timed out? */
3342  if (!dd_data) {
3343  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3344  job = NULL;
3345  goto job_done_out;
3346  }
3347 
3348  /*
3349  * The outgoing buffer is readily referred from the dma buffer,
3350  * just need to get header part from mailboxq structure.
3351  */
3352  pmb = (uint8_t *)&pmboxq->u.mb;
3353  pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3354  /* Copy the byte swapped response mailbox back to the user */
3355  memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3356  /* if there is any non-embedded extended data copy that too */
3357  dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3358  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3359  if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3360  &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3361  pmbx = (uint8_t *)dmabuf->virt;
3362  /* byte swap the extended data following the mailbox command */
3363  lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3364  &pmbx[sizeof(MAILBOX_t)],
3365  sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3366  }
3367 
3368  job = dd_data->context_un.mbox.set_job;
3369  if (job) {
3370  size = job->reply_payload.payload_len;
3371  job->reply->reply_payload_rcv_len =
3372  sg_copy_from_buffer(job->reply_payload.sg_list,
3373  job->reply_payload.sg_cnt,
3374  pmb_buf, size);
3375  /* result for successful */
3376  job->reply->result = 0;
3377  job->dd_data = NULL;
3378  /* need to hold the lock util we set job->dd_data to NULL
3379  * to hold off the timeout handler from midlayer to take
3380  * any action.
3381  */
3382  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3384  "2937 SLI_CONFIG ext-buffer maibox command "
3385  "(x%x/x%x) complete bsg job done, bsize:%d\n",
3386  phba->mbox_ext_buf_ctx.nembType,
3387  phba->mbox_ext_buf_ctx.mboxType, size);
3389  phba->mbox_ext_buf_ctx.nembType,
3390  phba->mbox_ext_buf_ctx.mboxType,
3392  phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3393  } else
3394  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3395 
3396 job_done_out:
3397  if (!job)
3399  "2938 SLI_CONFIG ext-buffer maibox "
3400  "command (x%x/x%x) failure, rc:x%x\n",
3401  phba->mbox_ext_buf_ctx.nembType,
3402  phba->mbox_ext_buf_ctx.mboxType, rc);
3403  /* state change */
3404  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3405  kfree(dd_data);
3406 
3407  return job;
3408 }
3409 
3418 static void
3419 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3420 {
3421  struct fc_bsg_job *job;
3422 
3423  /* handle the BSG job with mailbox command */
3424  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3425  pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3426 
3428  "2939 SLI_CONFIG ext-buffer rd maibox command "
3429  "complete, ctxState:x%x, mbxStatus:x%x\n",
3430  phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3431 
3432  job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3433 
3434  if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3435  lpfc_bsg_mbox_ext_session_reset(phba);
3436 
3437  /* free base driver mailbox structure memory */
3438  mempool_free(pmboxq, phba->mbox_mem_pool);
3439 
3440  /* complete the bsg job if we have it */
3441  if (job)
3442  job->job_done(job);
3443 
3444  return;
3445 }
3446 
3455 static void
3456 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3457 {
3458  struct fc_bsg_job *job;
3459 
3460  /* handle the BSG job with the mailbox command */
3461  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3462  pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3463 
3465  "2940 SLI_CONFIG ext-buffer wr maibox command "
3466  "complete, ctxState:x%x, mbxStatus:x%x\n",
3467  phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3468 
3469  job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3470 
3471  /* free all memory, including dma buffers */
3472  mempool_free(pmboxq, phba->mbox_mem_pool);
3473  lpfc_bsg_mbox_ext_session_reset(phba);
3474 
3475  /* complete the bsg job if we have it */
3476  if (job)
3477  job->job_done(job);
3478 
3479  return;
3480 }
3481 
3482 static void
3483 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3484  uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3485  struct lpfc_dmabuf *ext_dmabuf)
3486 {
3487  struct lpfc_sli_config_mbox *sli_cfg_mbx;
3488 
3489  /* pointer to the start of mailbox command */
3490  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3491 
3492  if (nemb_tp == nemb_mse) {
3493  if (index == 0) {
3494  sli_cfg_mbx->un.sli_config_emb0_subsys.
3495  mse[index].pa_hi =
3496  putPaddrHigh(mbx_dmabuf->phys +
3497  sizeof(MAILBOX_t));
3498  sli_cfg_mbx->un.sli_config_emb0_subsys.
3499  mse[index].pa_lo =
3500  putPaddrLow(mbx_dmabuf->phys +
3501  sizeof(MAILBOX_t));
3503  "2943 SLI_CONFIG(mse)[%d], "
3504  "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3505  index,
3506  sli_cfg_mbx->un.sli_config_emb0_subsys.
3507  mse[index].buf_len,
3508  sli_cfg_mbx->un.sli_config_emb0_subsys.
3509  mse[index].pa_hi,
3510  sli_cfg_mbx->un.sli_config_emb0_subsys.
3511  mse[index].pa_lo);
3512  } else {
3513  sli_cfg_mbx->un.sli_config_emb0_subsys.
3514  mse[index].pa_hi =
3515  putPaddrHigh(ext_dmabuf->phys);
3516  sli_cfg_mbx->un.sli_config_emb0_subsys.
3517  mse[index].pa_lo =
3518  putPaddrLow(ext_dmabuf->phys);
3520  "2944 SLI_CONFIG(mse)[%d], "
3521  "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3522  index,
3523  sli_cfg_mbx->un.sli_config_emb0_subsys.
3524  mse[index].buf_len,
3525  sli_cfg_mbx->un.sli_config_emb0_subsys.
3526  mse[index].pa_hi,
3527  sli_cfg_mbx->un.sli_config_emb0_subsys.
3528  mse[index].pa_lo);
3529  }
3530  } else {
3531  if (index == 0) {
3532  sli_cfg_mbx->un.sli_config_emb1_subsys.
3533  hbd[index].pa_hi =
3534  putPaddrHigh(mbx_dmabuf->phys +
3535  sizeof(MAILBOX_t));
3536  sli_cfg_mbx->un.sli_config_emb1_subsys.
3537  hbd[index].pa_lo =
3538  putPaddrLow(mbx_dmabuf->phys +
3539  sizeof(MAILBOX_t));
3541  "3007 SLI_CONFIG(hbd)[%d], "
3542  "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3543  index,
3544  bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3545  &sli_cfg_mbx->un.
3546  sli_config_emb1_subsys.hbd[index]),
3547  sli_cfg_mbx->un.sli_config_emb1_subsys.
3548  hbd[index].pa_hi,
3549  sli_cfg_mbx->un.sli_config_emb1_subsys.
3550  hbd[index].pa_lo);
3551 
3552  } else {
3553  sli_cfg_mbx->un.sli_config_emb1_subsys.
3554  hbd[index].pa_hi =
3555  putPaddrHigh(ext_dmabuf->phys);
3556  sli_cfg_mbx->un.sli_config_emb1_subsys.
3557  hbd[index].pa_lo =
3558  putPaddrLow(ext_dmabuf->phys);
3560  "3008 SLI_CONFIG(hbd)[%d], "
3561  "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3562  index,
3563  bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3564  &sli_cfg_mbx->un.
3565  sli_config_emb1_subsys.hbd[index]),
3566  sli_cfg_mbx->un.sli_config_emb1_subsys.
3567  hbd[index].pa_hi,
3568  sli_cfg_mbx->un.sli_config_emb1_subsys.
3569  hbd[index].pa_lo);
3570  }
3571  }
3572  return;
3573 }
3574 
3585 static int
3586 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3587  enum nemb_type nemb_tp,
3588  struct lpfc_dmabuf *dmabuf)
3589 {
3590  struct lpfc_sli_config_mbox *sli_cfg_mbx;
3591  struct dfc_mbox_req *mbox_req;
3592  struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3593  uint32_t ext_buf_cnt, ext_buf_index;
3594  struct lpfc_dmabuf *ext_dmabuf = NULL;
3595  struct bsg_job_data *dd_data = NULL;
3596  LPFC_MBOXQ_t *pmboxq = NULL;
3597  MAILBOX_t *pmb;
3598  uint8_t *pmbx;
3599  int rc, i;
3600 
3601  mbox_req =
3602  (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3603 
3604  /* pointer to the start of mailbox command */
3605  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3606 
3607  if (nemb_tp == nemb_mse) {
3608  ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3609  &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3610  if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3612  "2945 Handled SLI_CONFIG(mse) rd, "
3613  "ext_buf_cnt(%d) out of range(%d)\n",
3614  ext_buf_cnt,
3616  rc = -ERANGE;
3617  goto job_error;
3618  }
3620  "2941 Handled SLI_CONFIG(mse) rd, "
3621  "ext_buf_cnt:%d\n", ext_buf_cnt);
3622  } else {
3623  /* sanity check on interface type for support */
3624  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3626  rc = -ENODEV;
3627  goto job_error;
3628  }
3629  /* nemb_tp == nemb_hbd */
3630  ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3631  if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3633  "2946 Handled SLI_CONFIG(hbd) rd, "
3634  "ext_buf_cnt(%d) out of range(%d)\n",
3635  ext_buf_cnt,
3637  rc = -ERANGE;
3638  goto job_error;
3639  }
3641  "2942 Handled SLI_CONFIG(hbd) rd, "
3642  "ext_buf_cnt:%d\n", ext_buf_cnt);
3643  }
3644 
3645  /* before dma descriptor setup */
3647  sta_pre_addr, dmabuf, ext_buf_cnt);
3648 
3649  /* reject non-embedded mailbox command with none external buffer */
3650  if (ext_buf_cnt == 0) {
3651  rc = -EPERM;
3652  goto job_error;
3653  } else if (ext_buf_cnt > 1) {
3654  /* additional external read buffers */
3655  for (i = 1; i < ext_buf_cnt; i++) {
3656  ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3657  if (!ext_dmabuf) {
3658  rc = -ENOMEM;
3659  goto job_error;
3660  }
3661  list_add_tail(&ext_dmabuf->list,
3662  &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3663  }
3664  }
3665 
3666  /* bsg tracking structure */
3667  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3668  if (!dd_data) {
3669  rc = -ENOMEM;
3670  goto job_error;
3671  }
3672 
3673  /* mailbox command structure for base driver */
3674  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3675  if (!pmboxq) {
3676  rc = -ENOMEM;
3677  goto job_error;
3678  }
3679  memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3680 
3681  /* for the first external buffer */
3682  lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3683 
3684  /* for the rest of external buffer descriptors if any */
3685  if (ext_buf_cnt > 1) {
3686  ext_buf_index = 1;
3687  list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3688  &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3689  lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3690  ext_buf_index, dmabuf,
3691  curr_dmabuf);
3692  ext_buf_index++;
3693  }
3694  }
3695 
3696  /* after dma descriptor setup */
3698  sta_pos_addr, dmabuf, ext_buf_cnt);
3699 
3700  /* construct base driver mbox command */
3701  pmb = &pmboxq->u.mb;
3702  pmbx = (uint8_t *)dmabuf->virt;
3703  memcpy(pmb, pmbx, sizeof(*pmb));
3704  pmb->mbxOwner = OWN_HOST;
3705  pmboxq->vport = phba->pport;
3706 
3707  /* multi-buffer handling context */
3708  phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3709  phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3710  phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3711  phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3712  phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3713  phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3714 
3715  /* callback for multi-buffer read mailbox command */
3716  pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3717 
3718  /* context fields to callback function */
3719  pmboxq->context1 = dd_data;
3720  dd_data->type = TYPE_MBOX;
3721  dd_data->context_un.mbox.pmboxq = pmboxq;
3722  dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3723  dd_data->context_un.mbox.set_job = job;
3724  job->dd_data = dd_data;
3725 
3726  /* state change */
3727  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3728 
3729  /*
3730  * Non-embedded mailbox subcommand data gets byte swapped here because
3731  * the lower level driver code only does the first 64 mailbox words.
3732  */
3733  if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3734  &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3735  (nemb_tp == nemb_mse))
3736  lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3737  &pmbx[sizeof(MAILBOX_t)],
3738  sli_cfg_mbx->un.sli_config_emb0_subsys.
3739  mse[0].buf_len);
3740 
3741  rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3742  if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3744  "2947 Issued SLI_CONFIG ext-buffer "
3745  "maibox command, rc:x%x\n", rc);
3746  return SLI_CONFIG_HANDLED;
3747  }
3749  "2948 Failed to issue SLI_CONFIG ext-buffer "
3750  "maibox command, rc:x%x\n", rc);
3751  rc = -EPIPE;
3752 
3753 job_error:
3754  if (pmboxq)
3755  mempool_free(pmboxq, phba->mbox_mem_pool);
3756  lpfc_bsg_dma_page_list_free(phba,
3757  &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3758  kfree(dd_data);
3759  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3760  return rc;
3761 }
3762 
3772 static int
3773 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3774  enum nemb_type nemb_tp,
3775  struct lpfc_dmabuf *dmabuf)
3776 {
3777  struct dfc_mbox_req *mbox_req;
3778  struct lpfc_sli_config_mbox *sli_cfg_mbx;
3779  uint32_t ext_buf_cnt;
3780  struct bsg_job_data *dd_data = NULL;
3781  LPFC_MBOXQ_t *pmboxq = NULL;
3782  MAILBOX_t *pmb;
3783  uint8_t *mbx;
3784  int rc = SLI_CONFIG_NOT_HANDLED, i;
3785 
3786  mbox_req =
3787  (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3788 
3789  /* pointer to the start of mailbox command */
3790  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3791 
3792  if (nemb_tp == nemb_mse) {
3793  ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3794  &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3795  if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3797  "2953 Failed SLI_CONFIG(mse) wr, "
3798  "ext_buf_cnt(%d) out of range(%d)\n",
3799  ext_buf_cnt,
3801  return -ERANGE;
3802  }
3804  "2949 Handled SLI_CONFIG(mse) wr, "
3805  "ext_buf_cnt:%d\n", ext_buf_cnt);
3806  } else {
3807  /* sanity check on interface type for support */
3808  if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3810  return -ENODEV;
3811  /* nemb_tp == nemb_hbd */
3812  ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3813  if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3815  "2954 Failed SLI_CONFIG(hbd) wr, "
3816  "ext_buf_cnt(%d) out of range(%d)\n",
3817  ext_buf_cnt,
3819  return -ERANGE;
3820  }
3822  "2950 Handled SLI_CONFIG(hbd) wr, "
3823  "ext_buf_cnt:%d\n", ext_buf_cnt);
3824  }
3825 
3826  /* before dma buffer descriptor setup */
3828  sta_pre_addr, dmabuf, ext_buf_cnt);
3829 
3830  if (ext_buf_cnt == 0)
3831  return -EPERM;
3832 
3833  /* for the first external buffer */
3834  lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3835 
3836  /* after dma descriptor setup */
3838  sta_pos_addr, dmabuf, ext_buf_cnt);
3839 
3840  /* log for looking forward */
3841  for (i = 1; i < ext_buf_cnt; i++) {
3842  if (nemb_tp == nemb_mse)
3844  "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3845  i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3846  mse[i].buf_len);
3847  else
3849  "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3850  i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3851  &sli_cfg_mbx->un.sli_config_emb1_subsys.
3852  hbd[i]));
3853  }
3854 
3855  /* multi-buffer handling context */
3856  phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3857  phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3858  phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3859  phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3860  phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3861  phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3862 
3863  if (ext_buf_cnt == 1) {
3864  /* bsg tracking structure */
3865  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3866  if (!dd_data) {
3867  rc = -ENOMEM;
3868  goto job_error;
3869  }
3870 
3871  /* mailbox command structure for base driver */
3872  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3873  if (!pmboxq) {
3874  rc = -ENOMEM;
3875  goto job_error;
3876  }
3877  memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3878  pmb = &pmboxq->u.mb;
3879  mbx = (uint8_t *)dmabuf->virt;
3880  memcpy(pmb, mbx, sizeof(*pmb));
3881  pmb->mbxOwner = OWN_HOST;
3882  pmboxq->vport = phba->pport;
3883 
3884  /* callback for multi-buffer read mailbox command */
3885  pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3886 
3887  /* context fields to callback function */
3888  pmboxq->context1 = dd_data;
3889  dd_data->type = TYPE_MBOX;
3890  dd_data->context_un.mbox.pmboxq = pmboxq;
3891  dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3892  dd_data->context_un.mbox.set_job = job;
3893  job->dd_data = dd_data;
3894 
3895  /* state change */
3896  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3897 
3898  rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3899  if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3901  "2955 Issued SLI_CONFIG ext-buffer "
3902  "maibox command, rc:x%x\n", rc);
3903  return SLI_CONFIG_HANDLED;
3904  }
3906  "2956 Failed to issue SLI_CONFIG ext-buffer "
3907  "maibox command, rc:x%x\n", rc);
3908  rc = -EPIPE;
3909  goto job_error;
3910  }
3911 
3912  /* wait for additoinal external buffers */
3913  job->reply->result = 0;
3914  job->job_done(job);
3915  return SLI_CONFIG_HANDLED;
3916 
3917 job_error:
3918  if (pmboxq)
3919  mempool_free(pmboxq, phba->mbox_mem_pool);
3920  kfree(dd_data);
3921 
3922  return rc;
3923 }
3924 
3935 static int
3936 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3937  struct lpfc_dmabuf *dmabuf)
3938 {
3939  struct lpfc_sli_config_mbox *sli_cfg_mbx;
3940  uint32_t subsys;
3941  uint32_t opcode;
3942  int rc = SLI_CONFIG_NOT_HANDLED;
3943 
3944  /* state change on new multi-buffer pass-through mailbox command */
3945  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3946 
3947  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3948 
3949  if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3950  &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3951  subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3952  &sli_cfg_mbx->un.sli_config_emb0_subsys);
3953  opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3954  &sli_cfg_mbx->un.sli_config_emb0_subsys);
3955  if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3956  switch (opcode) {
3957  case FCOE_OPCODE_READ_FCF:
3959  "2957 Handled SLI_CONFIG "
3960  "subsys_fcoe, opcode:x%x\n",
3961  opcode);
3962  rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3963  nemb_mse, dmabuf);
3964  break;
3965  case FCOE_OPCODE_ADD_FCF:
3967  "2958 Handled SLI_CONFIG "
3968  "subsys_fcoe, opcode:x%x\n",
3969  opcode);
3970  rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3971  nemb_mse, dmabuf);
3972  break;
3973  default:
3975  "2959 Reject SLI_CONFIG "
3976  "subsys_fcoe, opcode:x%x\n",
3977  opcode);
3978  rc = -EPERM;
3979  break;
3980  }
3981  } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3982  switch (opcode) {
3986  "3106 Handled SLI_CONFIG "
3987  "subsys_comn, opcode:x%x\n",
3988  opcode);
3989  rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3990  nemb_mse, dmabuf);
3991  break;
3992  default:
3994  "3107 Reject SLI_CONFIG "
3995  "subsys_comn, opcode:x%x\n",
3996  opcode);
3997  rc = -EPERM;
3998  break;
3999  }
4000  } else {
4002  "2977 Reject SLI_CONFIG "
4003  "subsys:x%d, opcode:x%x\n",
4004  subsys, opcode);
4005  rc = -EPERM;
4006  }
4007  } else {
4008  subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4009  &sli_cfg_mbx->un.sli_config_emb1_subsys);
4010  opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4011  &sli_cfg_mbx->un.sli_config_emb1_subsys);
4012  if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4013  switch (opcode) {
4017  "2960 Handled SLI_CONFIG "
4018  "subsys_comn, opcode:x%x\n",
4019  opcode);
4020  rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4021  nemb_hbd, dmabuf);
4022  break;
4025  "2961 Handled SLI_CONFIG "
4026  "subsys_comn, opcode:x%x\n",
4027  opcode);
4028  rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4029  nemb_hbd, dmabuf);
4030  break;
4031  default:
4033  "2962 Not handled SLI_CONFIG "
4034  "subsys_comn, opcode:x%x\n",
4035  opcode);
4037  break;
4038  }
4039  } else {
4041  "2978 Not handled SLI_CONFIG "
4042  "subsys:x%d, opcode:x%x\n",
4043  subsys, opcode);
4045  }
4046  }
4047 
4048  /* state reset on not handled new multi-buffer mailbox command */
4049  if (rc != SLI_CONFIG_HANDLED)
4050  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4051 
4052  return rc;
4053 }
4054 
4062 static void
4063 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4064 {
4065  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4066  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4067  else
4068  lpfc_bsg_mbox_ext_session_reset(phba);
4069  return;
4070 }
4071 
4080 static int
4081 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4082 {
4083  struct lpfc_sli_config_mbox *sli_cfg_mbx;
4084  struct lpfc_dmabuf *dmabuf;
4085  uint8_t *pbuf;
4086  uint32_t size;
4087  uint32_t index;
4088 
4089  index = phba->mbox_ext_buf_ctx.seqNum;
4090  phba->mbox_ext_buf_ctx.seqNum++;
4091 
4092  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4093  phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4094 
4095  if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4096  size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4097  &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4099  "2963 SLI_CONFIG (mse) ext-buffer rd get "
4100  "buffer[%d], size:%d\n", index, size);
4101  } else {
4102  size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4103  &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4105  "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4106  "buffer[%d], size:%d\n", index, size);
4107  }
4108  if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4109  return -EPIPE;
4110  dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4111  struct lpfc_dmabuf, list);
4112  list_del_init(&dmabuf->list);
4113 
4114  /* after dma buffer descriptor setup */
4117  dmabuf, index);
4118 
4119  pbuf = (uint8_t *)dmabuf->virt;
4120  job->reply->reply_payload_rcv_len =
4121  sg_copy_from_buffer(job->reply_payload.sg_list,
4122  job->reply_payload.sg_cnt,
4123  pbuf, size);
4124 
4125  lpfc_bsg_dma_page_free(phba, dmabuf);
4126 
4127  if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4129  "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4130  "command session done\n");
4131  lpfc_bsg_mbox_ext_session_reset(phba);
4132  }
4133 
4134  job->reply->result = 0;
4135  job->job_done(job);
4136 
4137  return SLI_CONFIG_HANDLED;
4138 }
4139 
4148 static int
4149 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4150  struct lpfc_dmabuf *dmabuf)
4151 {
4152  struct lpfc_sli_config_mbox *sli_cfg_mbx;
4153  struct bsg_job_data *dd_data = NULL;
4154  LPFC_MBOXQ_t *pmboxq = NULL;
4155  MAILBOX_t *pmb;
4156  enum nemb_type nemb_tp;
4157  uint8_t *pbuf;
4158  uint32_t size;
4159  uint32_t index;
4160  int rc;
4161 
4162  index = phba->mbox_ext_buf_ctx.seqNum;
4163  phba->mbox_ext_buf_ctx.seqNum++;
4164  nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4165 
4166  sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4167  phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4168 
4169  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4170  if (!dd_data) {
4171  rc = -ENOMEM;
4172  goto job_error;
4173  }
4174 
4175  pbuf = (uint8_t *)dmabuf->virt;
4176  size = job->request_payload.payload_len;
4177  sg_copy_to_buffer(job->request_payload.sg_list,
4178  job->request_payload.sg_cnt,
4179  pbuf, size);
4180 
4181  if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4183  "2966 SLI_CONFIG (mse) ext-buffer wr set "
4184  "buffer[%d], size:%d\n",
4185  phba->mbox_ext_buf_ctx.seqNum, size);
4186 
4187  } else {
4189  "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4190  "buffer[%d], size:%d\n",
4191  phba->mbox_ext_buf_ctx.seqNum, size);
4192 
4193  }
4194 
4195  /* set up external buffer descriptor and add to external buffer list */
4196  lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4197  phba->mbox_ext_buf_ctx.mbx_dmabuf,
4198  dmabuf);
4199  list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4200 
4201  /* after write dma buffer */
4204  dmabuf, index);
4205 
4206  if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4208  "2968 SLI_CONFIG ext-buffer wr all %d "
4209  "ebuffers received\n",
4210  phba->mbox_ext_buf_ctx.numBuf);
4211  /* mailbox command structure for base driver */
4212  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4213  if (!pmboxq) {
4214  rc = -ENOMEM;
4215  goto job_error;
4216  }
4217  memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4218  pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4219  pmb = &pmboxq->u.mb;
4220  memcpy(pmb, pbuf, sizeof(*pmb));
4221  pmb->mbxOwner = OWN_HOST;
4222  pmboxq->vport = phba->pport;
4223 
4224  /* callback for multi-buffer write mailbox command */
4225  pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4226 
4227  /* context fields to callback function */
4228  pmboxq->context1 = dd_data;
4229  dd_data->type = TYPE_MBOX;
4230  dd_data->context_un.mbox.pmboxq = pmboxq;
4231  dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4232  dd_data->context_un.mbox.set_job = job;
4233  job->dd_data = dd_data;
4234 
4235  /* state change */
4236  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4237 
4238  rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4239  if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4241  "2969 Issued SLI_CONFIG ext-buffer "
4242  "maibox command, rc:x%x\n", rc);
4243  return SLI_CONFIG_HANDLED;
4244  }
4246  "2970 Failed to issue SLI_CONFIG ext-buffer "
4247  "maibox command, rc:x%x\n", rc);
4248  rc = -EPIPE;
4249  goto job_error;
4250  }
4251 
4252  /* wait for additoinal external buffers */
4253  job->reply->result = 0;
4254  job->job_done(job);
4255  return SLI_CONFIG_HANDLED;
4256 
4257 job_error:
4258  lpfc_bsg_dma_page_free(phba, dmabuf);
4259  kfree(dd_data);
4260 
4261  return rc;
4262 }
4263 
4273 static int
4274 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4275  struct lpfc_dmabuf *dmabuf)
4276 {
4277  int rc;
4278 
4280  "2971 SLI_CONFIG buffer (type:x%x)\n",
4281  phba->mbox_ext_buf_ctx.mboxType);
4282 
4283  if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4284  if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4286  "2972 SLI_CONFIG rd buffer state "
4287  "mismatch:x%x\n",
4288  phba->mbox_ext_buf_ctx.state);
4289  lpfc_bsg_mbox_ext_abort(phba);
4290  return -EPIPE;
4291  }
4292  rc = lpfc_bsg_read_ebuf_get(phba, job);
4293  if (rc == SLI_CONFIG_HANDLED)
4294  lpfc_bsg_dma_page_free(phba, dmabuf);
4295  } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4296  if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4298  "2973 SLI_CONFIG wr buffer state "
4299  "mismatch:x%x\n",
4300  phba->mbox_ext_buf_ctx.state);
4301  lpfc_bsg_mbox_ext_abort(phba);
4302  return -EPIPE;
4303  }
4304  rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4305  }
4306  return rc;
4307 }
4308 
4318 static int
4319 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4320  struct lpfc_dmabuf *dmabuf)
4321 {
4322  struct dfc_mbox_req *mbox_req;
4323  int rc = SLI_CONFIG_NOT_HANDLED;
4324 
4325  mbox_req =
4326  (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4327 
4328  /* mbox command with/without single external buffer */
4329  if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4330  return rc;
4331 
4332  /* mbox command and first external buffer */
4333  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4334  if (mbox_req->extSeqNum == 1) {
4336  "2974 SLI_CONFIG mailbox: tag:%d, "
4337  "seq:%d\n", mbox_req->extMboxTag,
4338  mbox_req->extSeqNum);
4339  rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4340  return rc;
4341  } else
4342  goto sli_cfg_ext_error;
4343  }
4344 
4345  /*
4346  * handle additional external buffers
4347  */
4348 
4349  /* check broken pipe conditions */
4350  if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4351  goto sli_cfg_ext_error;
4352  if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4353  goto sli_cfg_ext_error;
4354  if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4355  goto sli_cfg_ext_error;
4356 
4358  "2975 SLI_CONFIG mailbox external buffer: "
4359  "extSta:x%x, tag:%d, seq:%d\n",
4360  phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4361  mbox_req->extSeqNum);
4362  rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4363  return rc;
4364 
4365 sli_cfg_ext_error:
4366  /* all other cases, broken pipe */
4368  "2976 SLI_CONFIG mailbox broken pipe: "
4369  "ctxSta:x%x, ctxNumBuf:%d "
4370  "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4371  phba->mbox_ext_buf_ctx.state,
4372  phba->mbox_ext_buf_ctx.numBuf,
4373  phba->mbox_ext_buf_ctx.mbxTag,
4374  phba->mbox_ext_buf_ctx.seqNum,
4375  mbox_req->extMboxTag, mbox_req->extSeqNum);
4376 
4377  lpfc_bsg_mbox_ext_session_reset(phba);
4378 
4379  return -EPIPE;
4380 }
4381 
4395 static uint32_t
4396 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4397  struct lpfc_vport *vport)
4398 {
4399  LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4400  MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4401  /* a 4k buffer to hold the mb and extended data from/to the bsg */
4402  uint8_t *pmbx = NULL;
4403  struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4404  struct lpfc_dmabuf *dmabuf = NULL;
4405  struct dfc_mbox_req *mbox_req;
4406  struct READ_EVENT_LOG_VAR *rdEventLog;
4407  uint32_t transmit_length, receive_length, mode;
4408  struct lpfc_mbx_sli4_config *sli4_config;
4409  struct lpfc_mbx_nembed_cmd *nembed_sge;
4410  struct mbox_header *header;
4411  struct ulp_bde64 *bde;
4412  uint8_t *ext = NULL;
4413  int rc = 0;
4414  uint8_t *from;
4415  uint32_t size;
4416 
4417 
4418  /* in case no data is transferred */
4419  job->reply->reply_payload_rcv_len = 0;
4420 
4421  /* sanity check to protect driver */
4422  if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4423  job->request_payload.payload_len > BSG_MBOX_SIZE) {
4424  rc = -ERANGE;
4425  goto job_done;
4426  }
4427 
4428  /*
4429  * Don't allow mailbox commands to be sent when blocked or when in
4430  * the middle of discovery
4431  */
4432  if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4433  rc = -EAGAIN;
4434  goto job_done;
4435  }
4436 
4437  mbox_req =
4438  (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4439 
4440  /* check if requested extended data lengths are valid */
4441  if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4442  (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4443  rc = -ERANGE;
4444  goto job_done;
4445  }
4446 
4447  dmabuf = lpfc_bsg_dma_page_alloc(phba);
4448  if (!dmabuf || !dmabuf->virt) {
4449  rc = -ENOMEM;
4450  goto job_done;
4451  }
4452 
4453  /* Get the mailbox command or external buffer from BSG */
4454  pmbx = (uint8_t *)dmabuf->virt;
4455  size = job->request_payload.payload_len;
4456  sg_copy_to_buffer(job->request_payload.sg_list,
4457  job->request_payload.sg_cnt, pmbx, size);
4458 
4459  /* Handle possible SLI_CONFIG with non-embedded payloads */
4460  if (phba->sli_rev == LPFC_SLI_REV4) {
4461  rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4462  if (rc == SLI_CONFIG_HANDLED)
4463  goto job_cont;
4464  if (rc)
4465  goto job_done;
4466  /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4467  }
4468 
4469  rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4470  if (rc != 0)
4471  goto job_done; /* must be negative */
4472 
4473  /* allocate our bsg tracking structure */
4474  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4475  if (!dd_data) {
4477  "2727 Failed allocation of dd_data\n");
4478  rc = -ENOMEM;
4479  goto job_done;
4480  }
4481 
4482  pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4483  if (!pmboxq) {
4484  rc = -ENOMEM;
4485  goto job_done;
4486  }
4487  memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4488 
4489  pmb = &pmboxq->u.mb;
4490  memcpy(pmb, pmbx, sizeof(*pmb));
4491  pmb->mbxOwner = OWN_HOST;
4492  pmboxq->vport = vport;
4493 
4494  /* If HBA encountered an error attention, allow only DUMP
4495  * or RESTART mailbox commands until the HBA is restarted.
4496  */
4497  if (phba->pport->stopped &&
4498  pmb->mbxCommand != MBX_DUMP_MEMORY &&
4499  pmb->mbxCommand != MBX_RESTART &&
4500  pmb->mbxCommand != MBX_WRITE_VPARMS &&
4501  pmb->mbxCommand != MBX_WRITE_WWN)
4503  "2797 mbox: Issued mailbox cmd "
4504  "0x%x while in stopped state.\n",
4505  pmb->mbxCommand);
4506 
4507  /* extended mailbox commands will need an extended buffer */
4508  if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4509  from = pmbx;
4510  ext = from + sizeof(MAILBOX_t);
4511  pmboxq->context2 = ext;
4512  pmboxq->in_ext_byte_len =
4513  mbox_req->inExtWLen * sizeof(uint32_t);
4514  pmboxq->out_ext_byte_len =
4515  mbox_req->outExtWLen * sizeof(uint32_t);
4516  pmboxq->mbox_offset_word = mbox_req->mbOffset;
4517  }
4518 
4519  /* biu diag will need a kernel buffer to transfer the data
4520  * allocate our own buffer and setup the mailbox command to
4521  * use ours
4522  */
4523  if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4524  transmit_length = pmb->un.varWords[1];
4525  receive_length = pmb->un.varWords[4];
4526  /* transmit length cannot be greater than receive length or
4527  * mailbox extension size
4528  */
4529  if ((transmit_length > receive_length) ||
4530  (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4531  rc = -ERANGE;
4532  goto job_done;
4533  }
4534  pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4535  putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4536  pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4537  putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4538 
4539  pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4540  putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4541  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4542  pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4543  putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4544  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4545  } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4546  rdEventLog = &pmb->un.varRdEventLog;
4547  receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4548  mode = bf_get(lpfc_event_log, rdEventLog);
4549 
4550  /* receive length cannot be greater than mailbox
4551  * extension size
4552  */
4553  if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4554  rc = -ERANGE;
4555  goto job_done;
4556  }
4557 
4558  /* mode zero uses a bde like biu diags command */
4559  if (mode == 0) {
4560  pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4561  + sizeof(MAILBOX_t));
4562  pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4563  + sizeof(MAILBOX_t));
4564  }
4565  } else if (phba->sli_rev == LPFC_SLI_REV4) {
4566  /* Let type 4 (well known data) through because the data is
4567  * returned in varwords[4-8]
4568  * otherwise check the recieve length and fetch the buffer addr
4569  */
4570  if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4571  (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4572  /* rebuild the command for sli4 using our own buffers
4573  * like we do for biu diags
4574  */
4575  receive_length = pmb->un.varWords[2];
4576  /* receive length cannot be greater than mailbox
4577  * extension size
4578  */
4579  if (receive_length == 0) {
4580  rc = -ERANGE;
4581  goto job_done;
4582  }
4583  pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4584  + sizeof(MAILBOX_t));
4585  pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4586  + sizeof(MAILBOX_t));
4587  } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4588  pmb->un.varUpdateCfg.co) {
4589  bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4590 
4591  /* bde size cannot be greater than mailbox ext size */
4592  if (bde->tus.f.bdeSize >
4593  BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4594  rc = -ERANGE;
4595  goto job_done;
4596  }
4597  bde->addrHigh = putPaddrHigh(dmabuf->phys
4598  + sizeof(MAILBOX_t));
4599  bde->addrLow = putPaddrLow(dmabuf->phys
4600  + sizeof(MAILBOX_t));
4601  } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4602  /* Handling non-embedded SLI_CONFIG mailbox command */
4603  sli4_config = &pmboxq->u.mqe.un.sli4_config;
4604  if (!bf_get(lpfc_mbox_hdr_emb,
4605  &sli4_config->header.cfg_mhdr)) {
4606  /* rebuild the command for sli4 using our
4607  * own buffers like we do for biu diags
4608  */
4609  header = (struct mbox_header *)
4610  &pmb->un.varWords[0];
4611  nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4612  &pmb->un.varWords[0];
4613  receive_length = nembed_sge->sge[0].length;
4614 
4615  /* receive length cannot be greater than
4616  * mailbox extension size
4617  */
4618  if ((receive_length == 0) ||
4619  (receive_length >
4620  BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4621  rc = -ERANGE;
4622  goto job_done;
4623  }
4624 
4625  nembed_sge->sge[0].pa_hi =
4626  putPaddrHigh(dmabuf->phys
4627  + sizeof(MAILBOX_t));
4628  nembed_sge->sge[0].pa_lo =
4629  putPaddrLow(dmabuf->phys
4630  + sizeof(MAILBOX_t));
4631  }
4632  }
4633  }
4634 
4635  dd_data->context_un.mbox.dmabuffers = dmabuf;
4636 
4637  /* setup wake call as IOCB callback */
4639 
4640  /* setup context field to pass wait_queue pointer to wake function */
4641  pmboxq->context1 = dd_data;
4642  dd_data->type = TYPE_MBOX;
4643  dd_data->context_un.mbox.pmboxq = pmboxq;
4644  dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4645  dd_data->context_un.mbox.set_job = job;
4646  dd_data->context_un.mbox.ext = ext;
4647  dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4648  dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4649  dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4650  job->dd_data = dd_data;
4651 
4652  if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4653  (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4654  rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4655  if (rc != MBX_SUCCESS) {
4656  rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4657  goto job_done;
4658  }
4659 
4660  /* job finished, copy the data */
4661  memcpy(pmbx, pmb, sizeof(*pmb));
4662  job->reply->reply_payload_rcv_len =
4663  sg_copy_from_buffer(job->reply_payload.sg_list,
4664  job->reply_payload.sg_cnt,
4665  pmbx, size);
4666  /* not waiting mbox already done */
4667  rc = 0;
4668  goto job_done;
4669  }
4670 
4671  rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4672  if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4673  return 1; /* job started */
4674 
4675 job_done:
4676  /* common exit for error or job completed inline */
4677  if (pmboxq)
4678  mempool_free(pmboxq, phba->mbox_mem_pool);
4679  lpfc_bsg_dma_page_free(phba, dmabuf);
4680  kfree(dd_data);
4681 
4682 job_cont:
4683  return rc;
4684 }
4685 
4690 static int
4691 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4692 {
4693  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4694  struct lpfc_hba *phba = vport->phba;
4695  struct dfc_mbox_req *mbox_req;
4696  int rc = 0;
4697 
4698  /* mix-and-match backward compatibility */
4699  job->reply->reply_payload_rcv_len = 0;
4700  if (job->request_len <
4701  sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4703  "2737 Mix-and-match backward compability "
4704  "between MBOX_REQ old size:%d and "
4705  "new request size:%d\n",
4706  (int)(job->request_len -
4707  sizeof(struct fc_bsg_request)),
4708  (int)sizeof(struct dfc_mbox_req));
4709  mbox_req = (struct dfc_mbox_req *)
4710  job->request->rqst_data.h_vendor.vendor_cmd;
4711  mbox_req->extMboxTag = 0;
4712  mbox_req->extSeqNum = 0;
4713  }
4714 
4715  rc = lpfc_bsg_issue_mbox(phba, job, vport);
4716 
4717  if (rc == 0) {
4718  /* job done */
4719  job->reply->result = 0;
4720  job->dd_data = NULL;
4721  job->job_done(job);
4722  } else if (rc == 1)
4723  /* job submitted, will complete later*/
4724  rc = 0; /* return zero, no error */
4725  else {
4726  /* some error occurred */
4727  job->reply->result = rc;
4728  job->dd_data = NULL;
4729  }
4730 
4731  return rc;
4732 }
4733 
4751 static void
4752 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4753  struct lpfc_iocbq *cmdiocbq,
4754  struct lpfc_iocbq *rspiocbq)
4755 {
4756  struct bsg_job_data *dd_data;
4757  struct fc_bsg_job *job;
4758  IOCB_t *rsp;
4759  struct lpfc_dmabuf *bmp;
4760  struct lpfc_bsg_menlo *menlo;
4761  unsigned long flags;
4762  struct menlo_response *menlo_resp;
4763  int rc = 0;
4764 
4765  spin_lock_irqsave(&phba->ct_ev_lock, flags);
4766  dd_data = cmdiocbq->context1;
4767  if (!dd_data) {
4768  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4769  return;
4770  }
4771 
4772  menlo = &dd_data->context_un.menlo;
4773  job = menlo->set_job;
4774  job->dd_data = NULL; /* so timeout handler does not reply */
4775 
4776  spin_lock(&phba->hbalock);
4777  cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4778  if (cmdiocbq->context2 && rspiocbq)
4779  memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4780  &rspiocbq->iocb, sizeof(IOCB_t));
4781  spin_unlock(&phba->hbalock);
4782 
4783  bmp = menlo->bmp;
4784  rspiocbq = menlo->rspiocbq;
4785  rsp = &rspiocbq->iocb;
4786 
4787  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4788  job->request_payload.sg_cnt, DMA_TO_DEVICE);
4789  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4790  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4791 
4792  /* always return the xri, this would be used in the case
4793  * of a menlo download to allow the data to be sent as a continuation
4794  * of the exchange.
4795  */
4796  menlo_resp = (struct menlo_response *)
4797  job->reply->reply_data.vendor_reply.vendor_rsp;
4798  menlo_resp->xri = rsp->ulpContext;
4799  if (rsp->ulpStatus) {
4800  if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4801  switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4803  rc = -ETIMEDOUT;
4804  break;
4805  case IOERR_INVALID_RPI:
4806  rc = -EFAULT;
4807  break;
4808  default:
4809  rc = -EACCES;
4810  break;
4811  }
4812  } else
4813  rc = -EACCES;
4814  } else
4815  job->reply->reply_payload_rcv_len =
4816  rsp->un.genreq64.bdl.bdeSize;
4817 
4818  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4819  lpfc_sli_release_iocbq(phba, rspiocbq);
4820  lpfc_sli_release_iocbq(phba, cmdiocbq);
4821  kfree(bmp);
4822  kfree(dd_data);
4823  /* make error code available to userspace */
4824  job->reply->result = rc;
4825  /* complete the job back to userspace */
4826  job->job_done(job);
4827  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4828  return;
4829 }
4830 
4840 static int
4841 lpfc_menlo_cmd(struct fc_bsg_job *job)
4842 {
4843  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4844  struct lpfc_hba *phba = vport->phba;
4845  struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4846  IOCB_t *cmd, *rsp;
4847  int rc = 0;
4848  struct menlo_command *menlo_cmd;
4849  struct menlo_response *menlo_resp;
4850  struct lpfc_dmabuf *bmp = NULL;
4851  int request_nseg;
4852  int reply_nseg;
4853  struct scatterlist *sgel = NULL;
4854  int numbde;
4855  dma_addr_t busaddr;
4856  struct bsg_job_data *dd_data;
4857  struct ulp_bde64 *bpl = NULL;
4858 
4859  /* in case no data is returned return just the return code */
4860  job->reply->reply_payload_rcv_len = 0;
4861 
4862  if (job->request_len <
4863  sizeof(struct fc_bsg_request) +
4864  sizeof(struct menlo_command)) {
4866  "2784 Received MENLO_CMD request below "
4867  "minimum size\n");
4868  rc = -ERANGE;
4869  goto no_dd_data;
4870  }
4871 
4872  if (job->reply_len <
4873  sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4875  "2785 Received MENLO_CMD reply below "
4876  "minimum size\n");
4877  rc = -ERANGE;
4878  goto no_dd_data;
4879  }
4880 
4881  if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4883  "2786 Adapter does not support menlo "
4884  "commands\n");
4885  rc = -EPERM;
4886  goto no_dd_data;
4887  }
4888 
4889  menlo_cmd = (struct menlo_command *)
4890  job->request->rqst_data.h_vendor.vendor_cmd;
4891 
4892  menlo_resp = (struct menlo_response *)
4893  job->reply->reply_data.vendor_reply.vendor_rsp;
4894 
4895  /* allocate our bsg tracking structure */
4896  dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4897  if (!dd_data) {
4899  "2787 Failed allocation of dd_data\n");
4900  rc = -ENOMEM;
4901  goto no_dd_data;
4902  }
4903 
4904  bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4905  if (!bmp) {
4906  rc = -ENOMEM;
4907  goto free_dd;
4908  }
4909 
4910  cmdiocbq = lpfc_sli_get_iocbq(phba);
4911  if (!cmdiocbq) {
4912  rc = -ENOMEM;
4913  goto free_bmp;
4914  }
4915 
4916  rspiocbq = lpfc_sli_get_iocbq(phba);
4917  if (!rspiocbq) {
4918  rc = -ENOMEM;
4919  goto free_cmdiocbq;
4920  }
4921 
4922  rsp = &rspiocbq->iocb;
4923 
4924  bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4925  if (!bmp->virt) {
4926  rc = -ENOMEM;
4927  goto free_rspiocbq;
4928  }
4929 
4930  INIT_LIST_HEAD(&bmp->list);
4931  bpl = (struct ulp_bde64 *) bmp->virt;
4932  request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4933  job->request_payload.sg_cnt, DMA_TO_DEVICE);
4934  for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4935  busaddr = sg_dma_address(sgel);
4936  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4937  bpl->tus.f.bdeSize = sg_dma_len(sgel);
4938  bpl->tus.w = cpu_to_le32(bpl->tus.w);
4939  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4940  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4941  bpl++;
4942  }
4943 
4944  reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4945  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4946  for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4947  busaddr = sg_dma_address(sgel);
4948  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4949  bpl->tus.f.bdeSize = sg_dma_len(sgel);
4950  bpl->tus.w = cpu_to_le32(bpl->tus.w);
4951  bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4952  bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4953  bpl++;
4954  }
4955 
4956  cmd = &cmdiocbq->iocb;
4957  cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4958  cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
4959  cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
4960  cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
4961  cmd->un.genreq64.bdl.bdeSize =
4962  (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
4963  cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
4964  cmd->un.genreq64.w5.hcsw.Dfctl = 0;
4965  cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
4966  cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
4967  cmd->ulpBdeCount = 1;
4968  cmd->ulpClass = CLASS3;
4969  cmd->ulpOwner = OWN_CHIP;
4970  cmd->ulpLe = 1; /* Limited Edition */
4971  cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
4972  cmdiocbq->vport = phba->pport;
4973  /* We want the firmware to timeout before we do */
4974  cmd->ulpTimeout = MENLO_TIMEOUT - 5;
4975  cmdiocbq->context3 = bmp;
4976  cmdiocbq->context2 = rspiocbq;
4977  cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
4978  cmdiocbq->context1 = dd_data;
4979  cmdiocbq->context2 = rspiocbq;
4980  if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
4981  cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
4982  cmd->ulpPU = MENLO_PU; /* 3 */
4983  cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
4984  cmd->ulpContext = MENLO_CONTEXT; /* 0 */
4985  } else {
4986  cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
4987  cmd->ulpPU = 1;
4988  cmd->un.ulpWord[4] = 0;
4989  cmd->ulpContext = menlo_cmd->xri;
4990  }
4991 
4992  dd_data->type = TYPE_MENLO;
4993  dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
4994  dd_data->context_un.menlo.rspiocbq = rspiocbq;
4995  dd_data->context_un.menlo.set_job = job;
4996  dd_data->context_un.menlo.bmp = bmp;
4997 
4998  rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
4999  MENLO_TIMEOUT - 5);
5000  if (rc == IOCB_SUCCESS)
5001  return 0; /* done for now */
5002 
5003  /* iocb failed so cleanup */
5004  pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5005  job->request_payload.sg_cnt, DMA_TO_DEVICE);
5006  pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5007  job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5008 
5009  lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5010 
5011 free_rspiocbq:
5012  lpfc_sli_release_iocbq(phba, rspiocbq);
5013 free_cmdiocbq:
5014  lpfc_sli_release_iocbq(phba, cmdiocbq);
5015 free_bmp:
5016  kfree(bmp);
5017 free_dd:
5018  kfree(dd_data);
5019 no_dd_data:
5020  /* make error code available to userspace */
5021  job->reply->result = rc;
5022  job->dd_data = NULL;
5023  return rc;
5024 }
5025 
5030 static int
5031 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5032 {
5033  int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
5034  int rc;
5035 
5036  switch (command) {
5038  rc = lpfc_bsg_hba_set_event(job);
5039  break;
5041  rc = lpfc_bsg_hba_get_event(job);
5042  break;
5044  rc = lpfc_bsg_send_mgmt_rsp(job);
5045  break;
5047  rc = lpfc_bsg_diag_loopback_mode(job);
5048  break;
5050  rc = lpfc_sli4_bsg_diag_mode_end(job);
5051  break;
5053  rc = lpfc_bsg_diag_loopback_run(job);
5054  break;
5056  rc = lpfc_sli4_bsg_link_diag_test(job);
5057  break;
5059  rc = lpfc_bsg_get_dfc_rev(job);
5060  break;
5061  case LPFC_BSG_VENDOR_MBOX:
5062  rc = lpfc_bsg_mbox_cmd(job);
5063  break;
5066  rc = lpfc_menlo_cmd(job);
5067  break;
5068  default:
5069  rc = -EINVAL;
5070  job->reply->reply_payload_rcv_len = 0;
5071  /* make error code available to userspace */
5072  job->reply->result = rc;
5073  break;
5074  }
5075 
5076  return rc;
5077 }
5078 
5083 int
5085 {
5086  uint32_t msgcode;
5087  int rc;
5088 
5089  msgcode = job->request->msgcode;
5090  switch (msgcode) {
5091  case FC_BSG_HST_VENDOR:
5092  rc = lpfc_bsg_hst_vendor(job);
5093  break;
5094  case FC_BSG_RPT_ELS:
5095  rc = lpfc_bsg_rport_els(job);
5096  break;
5097  case FC_BSG_RPT_CT:
5098  rc = lpfc_bsg_send_mgmt_cmd(job);
5099  break;
5100  default:
5101  rc = -EINVAL;
5102  job->reply->reply_payload_rcv_len = 0;
5103  /* make error code available to userspace */
5104  job->reply->result = rc;
5105  break;
5106  }
5107 
5108  return rc;
5109 }
5110 
5118 int
5120 {
5121  struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5122  struct lpfc_hba *phba = vport->phba;
5123  struct lpfc_iocbq *cmdiocb;
5124  struct lpfc_bsg_event *evt;
5125  struct lpfc_bsg_iocb *iocb;
5126  struct lpfc_bsg_mbox *mbox;
5127  struct lpfc_bsg_menlo *menlo;
5128  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5129  struct bsg_job_data *dd_data;
5130  unsigned long flags;
5131 
5132  spin_lock_irqsave(&phba->ct_ev_lock, flags);
5133  dd_data = (struct bsg_job_data *)job->dd_data;
5134  /* timeout and completion crossed paths if no dd_data */
5135  if (!dd_data) {
5136  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5137  return 0;
5138  }
5139 
5140  switch (dd_data->type) {
5141  case TYPE_IOCB:
5142  iocb = &dd_data->context_un.iocb;
5143  cmdiocb = iocb->cmdiocbq;
5144  /* hint to completion handler that the job timed out */
5145  job->reply->result = -EAGAIN;
5146  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5147  /* this will call our completion handler */
5148  spin_lock_irq(&phba->hbalock);
5149  lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5150  spin_unlock_irq(&phba->hbalock);
5151  break;
5152  case TYPE_EVT:
5153  evt = dd_data->context_un.evt;
5154  /* this event has no job anymore */
5155  evt->set_job = NULL;
5156  job->dd_data = NULL;
5157  job->reply->reply_payload_rcv_len = 0;
5158  /* Return -EAGAIN which is our way of signallying the
5159  * app to retry.
5160  */
5161  job->reply->result = -EAGAIN;
5162  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5163  job->job_done(job);
5164  break;
5165  case TYPE_MBOX:
5166  mbox = &dd_data->context_un.mbox;
5167  /* this mbox has no job anymore */
5168  mbox->set_job = NULL;
5169  job->dd_data = NULL;
5170  job->reply->reply_payload_rcv_len = 0;
5171  job->reply->result = -EAGAIN;
5172  /* the mbox completion handler can now be run */
5173  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5174  job->job_done(job);
5175  if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5176  phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5177  break;
5178  case TYPE_MENLO:
5179  menlo = &dd_data->context_un.menlo;
5180  cmdiocb = menlo->cmdiocbq;
5181  /* hint to completion handler that the job timed out */
5182  job->reply->result = -EAGAIN;
5183  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5184  /* this will call our completion handler */
5185  spin_lock_irq(&phba->hbalock);
5186  lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5187  spin_unlock_irq(&phba->hbalock);
5188  break;
5189  default:
5190  spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5191  break;
5192  }
5193 
5194  /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5195  * otherwise an error message will be displayed on the console
5196  * so always return success (zero)
5197  */
5198  return 0;
5199 }