Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpfc_scsi.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for *
3  * Fibre Channel Host Bus Adapters. *
4  * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8  * *
9  * This program is free software; you can redistribute it and/or *
10  * modify it under the terms of version 2 of the GNU General *
11  * Public License as published by the Free Software Foundation. *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID. See the GNU General Public License for *
18  * more details, a copy of which can be found in the file COPYING *
19  * included with this package. *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi_transport_fc.h>
34 
35 #include "lpfc_version.h"
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 
48 #define LPFC_RESET_WAIT 2
49 #define LPFC_ABORT_WAIT 2
50 
52 
53 static char *dif_op_str[] = {
54  "PROT_NORMAL",
55  "PROT_READ_INSERT",
56  "PROT_WRITE_STRIP",
57  "PROT_READ_STRIP",
58  "PROT_WRITE_INSERT",
59  "PROT_READ_PASS",
60  "PROT_WRITE_PASS",
61 };
62 
64  __be16 guard_tag; /* Checksum */
65  __be16 app_tag; /* Opaque storage */
66  __be32 ref_tag; /* Target LBA or indirect LBA */
67 };
68 
69 static void
70 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71 static void
72 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73 
74 static void
75 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
76 {
77  void *src, *dst;
78  struct scatterlist *sgde = scsi_sglist(cmnd);
79 
80  if (!_dump_buf_data) {
82  "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
83  __func__);
84  return;
85  }
86 
87 
88  if (!sgde) {
90  "9051 BLKGRD: ERROR: data scatterlist is null\n");
91  return;
92  }
93 
94  dst = (void *) _dump_buf_data;
95  while (sgde) {
96  src = sg_virt(sgde);
97  memcpy(dst, src, sgde->length);
98  dst += sgde->length;
99  sgde = sg_next(sgde);
100  }
101 }
102 
103 static void
104 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
105 {
106  void *src, *dst;
107  struct scatterlist *sgde = scsi_prot_sglist(cmnd);
108 
109  if (!_dump_buf_dif) {
111  "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
112  __func__);
113  return;
114  }
115 
116  if (!sgde) {
118  "9053 BLKGRD: ERROR: prot scatterlist is null\n");
119  return;
120  }
121 
122  dst = _dump_buf_dif;
123  while (sgde) {
124  src = sg_virt(sgde);
125  memcpy(dst, src, sgde->length);
126  dst += sgde->length;
127  sgde = sg_next(sgde);
128  }
129 }
130 
139 static void
140 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
141  struct lpfc_scsi_buf *lpfc_cmd)
142 {
143  struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
144  if (sgl) {
145  sgl += 1;
146  sgl->word2 = le32_to_cpu(sgl->word2);
147  bf_set(lpfc_sli4_sge_last, sgl, 1);
148  sgl->word2 = cpu_to_le32(sgl->word2);
149  }
150 }
151 
160 static void
161 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
162 {
163  struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
164  struct lpfc_nodelist *pnode = rdata->pnode;
165  struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
166  unsigned long flags;
167  struct Scsi_Host *shost = cmd->device->host;
168  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
169  unsigned long latency;
170  int i;
171 
172  if (cmd->result)
173  return;
174 
175  latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
176 
177  spin_lock_irqsave(shost->host_lock, flags);
178  if (!vport->stat_data_enabled ||
179  vport->stat_data_blocked ||
180  !pnode ||
181  !pnode->lat_data ||
182  (phba->bucket_type == LPFC_NO_BUCKET)) {
183  spin_unlock_irqrestore(shost->host_lock, flags);
184  return;
185  }
186 
187  if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
188  i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
189  phba->bucket_step;
190  /* check array subscript bounds */
191  if (i < 0)
192  i = 0;
193  else if (i >= LPFC_MAX_BUCKET_COUNT)
194  i = LPFC_MAX_BUCKET_COUNT - 1;
195  } else {
196  for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
197  if (latency <= (phba->bucket_base +
198  ((1<<i)*phba->bucket_step)))
199  break;
200  }
201 
202  pnode->lat_data[i].cmd_count++;
203  spin_unlock_irqrestore(shost->host_lock, flags);
204 }
205 
218 static void
219 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
220  struct lpfc_vport *vport,
221  struct lpfc_nodelist *ndlp,
222  uint32_t lun,
223  uint32_t old_val,
224  uint32_t new_val)
225 {
226  struct lpfc_fast_path_event *fast_path_evt;
227  unsigned long flags;
228 
229  fast_path_evt = lpfc_alloc_fast_evt(phba);
230  if (!fast_path_evt)
231  return;
232 
233  fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
235  fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
237 
238  /* Report all luns with change in queue depth */
239  fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
240  if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
241  memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
242  &ndlp->nlp_portname, sizeof(struct lpfc_name));
243  memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
244  &ndlp->nlp_nodename, sizeof(struct lpfc_name));
245  }
246 
247  fast_path_evt->un.queue_depth_evt.oldval = old_val;
248  fast_path_evt->un.queue_depth_evt.newval = new_val;
249  fast_path_evt->vport = vport;
250 
251  fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
252  spin_lock_irqsave(&phba->hbalock, flags);
253  list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
254  spin_unlock_irqrestore(&phba->hbalock, flags);
255  lpfc_worker_wake_up(phba);
256 
257  return;
258 }
259 
270 int
271 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
272 {
273  struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
274  struct lpfc_hba *phba = vport->phba;
275  struct lpfc_rport_data *rdata;
276  unsigned long new_queue_depth, old_queue_depth;
277 
278  old_queue_depth = sdev->queue_depth;
279  scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
280  new_queue_depth = sdev->queue_depth;
281  rdata = sdev->hostdata;
282  if (rdata)
283  lpfc_send_sdev_queuedepth_change_event(phba, vport,
284  rdata->pnode, sdev->lun,
285  old_queue_depth,
286  new_queue_depth);
287  return sdev->queue_depth;
288 }
289 
301 void
303 {
304  unsigned long flags;
305  uint32_t evt_posted;
306 
307  spin_lock_irqsave(&phba->hbalock, flags);
308  atomic_inc(&phba->num_rsrc_err);
310 
312  spin_unlock_irqrestore(&phba->hbalock, flags);
313  return;
314  }
315 
317 
318  spin_unlock_irqrestore(&phba->hbalock, flags);
319 
320  spin_lock_irqsave(&phba->pport->work_port_lock, flags);
321  evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
322  if (!evt_posted)
323  phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
324  spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
325 
326  if (!evt_posted)
327  lpfc_worker_wake_up(phba);
328  return;
329 }
330 
342 static inline void
343 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
345 {
346  unsigned long flags;
347  struct lpfc_hba *phba = vport->phba;
348  uint32_t evt_posted;
349  atomic_inc(&phba->num_cmd_success);
350 
351  if (vport->cfg_lun_queue_depth <= queue_depth)
352  return;
353  spin_lock_irqsave(&phba->hbalock, flags);
354  if (time_before(jiffies,
356  time_before(jiffies,
358  spin_unlock_irqrestore(&phba->hbalock, flags);
359  return;
360  }
361  phba->last_ramp_up_time = jiffies;
362  spin_unlock_irqrestore(&phba->hbalock, flags);
363 
364  spin_lock_irqsave(&phba->pport->work_port_lock, flags);
365  evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
366  if (!evt_posted)
367  phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
368  spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
369 
370  if (!evt_posted)
371  lpfc_worker_wake_up(phba);
372  return;
373 }
374 
383 void
385 {
386  struct lpfc_vport **vports;
387  struct Scsi_Host *shost;
388  struct scsi_device *sdev;
389  unsigned long new_queue_depth;
390  unsigned long num_rsrc_err, num_cmd_success;
391  int i;
392 
393  num_rsrc_err = atomic_read(&phba->num_rsrc_err);
394  num_cmd_success = atomic_read(&phba->num_cmd_success);
395 
396  /*
397  * The error and success command counters are global per
398  * driver instance. If another handler has already
399  * operated on this error event, just exit.
400  */
401  if (num_rsrc_err == 0)
402  return;
403 
404  vports = lpfc_create_vport_work_array(phba);
405  if (vports != NULL)
406  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
407  shost = lpfc_shost_from_vport(vports[i]);
408  shost_for_each_device(sdev, shost) {
409  new_queue_depth =
410  sdev->queue_depth * num_rsrc_err /
411  (num_rsrc_err + num_cmd_success);
412  if (!new_queue_depth)
413  new_queue_depth = sdev->queue_depth - 1;
414  else
415  new_queue_depth = sdev->queue_depth -
416  new_queue_depth;
417  lpfc_change_queue_depth(sdev, new_queue_depth,
419  }
420  }
421  lpfc_destroy_vport_work_array(phba, vports);
422  atomic_set(&phba->num_rsrc_err, 0);
423  atomic_set(&phba->num_cmd_success, 0);
424 }
425 
435 void
437 {
438  struct lpfc_vport **vports;
439  struct Scsi_Host *shost;
440  struct scsi_device *sdev;
441  int i;
442 
443  vports = lpfc_create_vport_work_array(phba);
444  if (vports != NULL)
445  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
446  shost = lpfc_shost_from_vport(vports[i]);
447  shost_for_each_device(sdev, shost) {
448  if (vports[i]->cfg_lun_queue_depth <=
449  sdev->queue_depth)
450  continue;
452  sdev->queue_depth+1,
454  }
455  }
456  lpfc_destroy_vport_work_array(phba, vports);
457  atomic_set(&phba->num_rsrc_err, 0);
458  atomic_set(&phba->num_cmd_success, 0);
459 }
460 
469 void
471 {
472  struct lpfc_vport **vports;
473  struct Scsi_Host *shost;
474  struct scsi_device *sdev;
475  struct fc_rport *rport;
476  int i;
477 
478  vports = lpfc_create_vport_work_array(phba);
479  if (vports != NULL)
480  for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
481  shost = lpfc_shost_from_vport(vports[i]);
482  shost_for_each_device(sdev, shost) {
483  rport = starget_to_rport(scsi_target(sdev));
484  fc_remote_port_delete(rport);
485  }
486  }
487  lpfc_destroy_vport_work_array(phba, vports);
488 }
489 
506 static int
507 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
508 {
509  struct lpfc_hba *phba = vport->phba;
510  struct lpfc_scsi_buf *psb;
511  struct ulp_bde64 *bpl;
512  IOCB_t *iocb;
513  dma_addr_t pdma_phys_fcp_cmd;
514  dma_addr_t pdma_phys_fcp_rsp;
515  dma_addr_t pdma_phys_bpl;
516  uint16_t iotag;
517  int bcnt;
518 
519  for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
520  psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
521  if (!psb)
522  break;
523 
524  /*
525  * Get memory from the pci pool to map the virt space to pci
526  * bus space for an I/O. The DMA buffer includes space for the
527  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
528  * necessary to support the sg_tablesize.
529  */
530  psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
531  GFP_KERNEL, &psb->dma_handle);
532  if (!psb->data) {
533  kfree(psb);
534  break;
535  }
536 
537  /* Initialize virtual ptrs to dma_buf region. */
538  memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
539 
540  /* Allocate iotag for psb->cur_iocbq. */
541  iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
542  if (iotag == 0) {
543  pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
544  psb->data, psb->dma_handle);
545  kfree(psb);
546  break;
547  }
548  psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
549 
550  psb->fcp_cmnd = psb->data;
551  psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
552  psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
554 
555  /* Initialize local short-hand pointers. */
556  bpl = psb->fcp_bpl;
557  pdma_phys_fcp_cmd = psb->dma_handle;
558  pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
559  pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
561 
562  /*
563  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
564  * are sg list bdes. Initialize the first two and leave the
565  * rest for queuecommand.
566  */
567  bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
568  bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
569  bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
570  bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
571  bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
572 
573  /* Setup the physical region for the FCP RSP */
574  bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
575  bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
576  bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
577  bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
578  bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
579 
580  /*
581  * Since the IOCB for the FCP I/O is built into this
582  * lpfc_scsi_buf, initialize it with all known data now.
583  */
584  iocb = &psb->cur_iocbq.iocb;
585  iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
586  if ((phba->sli_rev == 3) &&
587  !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
588  /* fill in immediate fcp command BDE */
590  iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
592  unsli3.fcp_ext.icd);
593  iocb->un.fcpi64.bdl.addrHigh = 0;
594  iocb->ulpBdeCount = 0;
595  iocb->ulpLe = 0;
596  /* fill in response BDE */
597  iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
599  iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
600  sizeof(struct fcp_rsp);
601  iocb->unsli3.fcp_ext.rbde.addrLow =
602  putPaddrLow(pdma_phys_fcp_rsp);
603  iocb->unsli3.fcp_ext.rbde.addrHigh =
604  putPaddrHigh(pdma_phys_fcp_rsp);
605  } else {
607  iocb->un.fcpi64.bdl.bdeSize =
608  (2 * sizeof(struct ulp_bde64));
609  iocb->un.fcpi64.bdl.addrLow =
610  putPaddrLow(pdma_phys_bpl);
611  iocb->un.fcpi64.bdl.addrHigh =
612  putPaddrHigh(pdma_phys_bpl);
613  iocb->ulpBdeCount = 1;
614  iocb->ulpLe = 1;
615  }
616  iocb->ulpClass = CLASS3;
617  psb->status = IOSTAT_SUCCESS;
618  /* Put it back into the SCSI buffer list */
619  psb->cur_iocbq.context1 = psb;
620  lpfc_release_scsi_buf_s3(phba, psb);
621 
622  }
623 
624  return bcnt;
625 }
626 
634 void
636 {
637  struct lpfc_hba *phba = vport->phba;
638  struct lpfc_scsi_buf *psb, *next_psb;
639  unsigned long iflag = 0;
640 
641  spin_lock_irqsave(&phba->hbalock, iflag);
642  spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
643  list_for_each_entry_safe(psb, next_psb,
644  &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
645  if (psb->rdata && psb->rdata->pnode
646  && psb->rdata->pnode->vport == vport)
647  psb->rdata = NULL;
648  }
649  spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
650  spin_unlock_irqrestore(&phba->hbalock, iflag);
651 }
652 
661 void
663  struct sli4_wcqe_xri_aborted *axri)
664 {
665  uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
666  uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
667  struct lpfc_scsi_buf *psb, *next_psb;
668  unsigned long iflag = 0;
669  struct lpfc_iocbq *iocbq;
670  int i;
671  struct lpfc_nodelist *ndlp;
672  int rrq_empty = 0;
673  struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
674 
675  spin_lock_irqsave(&phba->hbalock, iflag);
676  spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
677  list_for_each_entry_safe(psb, next_psb,
678  &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
679  if (psb->cur_iocbq.sli4_xritag == xri) {
680  list_del(&psb->list);
681  psb->exch_busy = 0;
682  psb->status = IOSTAT_SUCCESS;
683  spin_unlock(
684  &phba->sli4_hba.abts_scsi_buf_list_lock);
685  if (psb->rdata && psb->rdata->pnode)
686  ndlp = psb->rdata->pnode;
687  else
688  ndlp = NULL;
689 
690  rrq_empty = list_empty(&phba->active_rrq_list);
691  spin_unlock_irqrestore(&phba->hbalock, iflag);
692  if (ndlp) {
693  lpfc_set_rrq_active(phba, ndlp,
694  psb->cur_iocbq.sli4_lxritag, rxid, 1);
695  lpfc_sli4_abts_err_handler(phba, ndlp, axri);
696  }
697  lpfc_release_scsi_buf_s4(phba, psb);
698  if (rrq_empty)
699  lpfc_worker_wake_up(phba);
700  return;
701  }
702  }
703  spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
704  for (i = 1; i <= phba->sli.last_iotag; i++) {
705  iocbq = phba->sli.iocbq_lookup[i];
706 
707  if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
708  (iocbq->iocb_flag & LPFC_IO_LIBDFC))
709  continue;
710  if (iocbq->sli4_xritag != xri)
711  continue;
712  psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
713  psb->exch_busy = 0;
714  spin_unlock_irqrestore(&phba->hbalock, iflag);
715  if (pring->txq_cnt)
716  lpfc_worker_wake_up(phba);
717  return;
718 
719  }
720  spin_unlock_irqrestore(&phba->hbalock, iflag);
721 }
722 
737 int
739  struct list_head *post_sblist, int sb_count)
740 {
741  struct lpfc_scsi_buf *psb, *psb_next;
742  int status;
743  int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
744  dma_addr_t pdma_phys_bpl1;
745  int last_xritag = NO_XRI;
746  LIST_HEAD(prep_sblist);
747  LIST_HEAD(blck_sblist);
748  LIST_HEAD(scsi_sblist);
749 
750  /* sanity check */
751  if (sb_count <= 0)
752  return -EINVAL;
753 
754  list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
755  list_del_init(&psb->list);
756  block_cnt++;
757  if ((last_xritag != NO_XRI) &&
758  (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
759  /* a hole in xri block, form a sgl posting block */
760  list_splice_init(&prep_sblist, &blck_sblist);
761  post_cnt = block_cnt - 1;
762  /* prepare list for next posting block */
763  list_add_tail(&psb->list, &prep_sblist);
764  block_cnt = 1;
765  } else {
766  /* prepare list for next posting block */
767  list_add_tail(&psb->list, &prep_sblist);
768  /* enough sgls for non-embed sgl mbox command */
769  if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
770  list_splice_init(&prep_sblist, &blck_sblist);
771  post_cnt = block_cnt;
772  block_cnt = 0;
773  }
774  }
775  num_posting++;
776  last_xritag = psb->cur_iocbq.sli4_xritag;
777 
778  /* end of repost sgl list condition for SCSI buffers */
779  if (num_posting == sb_count) {
780  if (post_cnt == 0) {
781  /* last sgl posting block */
782  list_splice_init(&prep_sblist, &blck_sblist);
783  post_cnt = block_cnt;
784  } else if (block_cnt == 1) {
785  /* last single sgl with non-contiguous xri */
787  pdma_phys_bpl1 = psb->dma_phys_bpl +
789  else
790  pdma_phys_bpl1 = 0;
791  status = lpfc_sli4_post_sgl(phba,
792  psb->dma_phys_bpl,
793  pdma_phys_bpl1,
794  psb->cur_iocbq.sli4_xritag);
795  if (status) {
796  /* failure, put on abort scsi list */
797  psb->exch_busy = 1;
798  } else {
799  /* success, put on SCSI buffer list */
800  psb->exch_busy = 0;
801  psb->status = IOSTAT_SUCCESS;
802  num_posted++;
803  }
804  /* success, put on SCSI buffer sgl list */
805  list_add_tail(&psb->list, &scsi_sblist);
806  }
807  }
808 
809  /* continue until a nembed page worth of sgls */
810  if (post_cnt == 0)
811  continue;
812 
813  /* post block of SCSI buffer list sgls */
814  status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
815  post_cnt);
816 
817  /* don't reset xirtag due to hole in xri block */
818  if (block_cnt == 0)
819  last_xritag = NO_XRI;
820 
821  /* reset SCSI buffer post count for next round of posting */
822  post_cnt = 0;
823 
824  /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
825  while (!list_empty(&blck_sblist)) {
826  list_remove_head(&blck_sblist, psb,
827  struct lpfc_scsi_buf, list);
828  if (status) {
829  /* failure, put on abort scsi list */
830  psb->exch_busy = 1;
831  } else {
832  /* success, put on SCSI buffer list */
833  psb->exch_busy = 0;
834  psb->status = IOSTAT_SUCCESS;
835  num_posted++;
836  }
837  list_add_tail(&psb->list, &scsi_sblist);
838  }
839  }
840  /* Push SCSI buffers with sgl posted to the availble list */
841  while (!list_empty(&scsi_sblist)) {
842  list_remove_head(&scsi_sblist, psb,
843  struct lpfc_scsi_buf, list);
844  lpfc_release_scsi_buf_s4(phba, psb);
845  }
846  return num_posted;
847 }
848 
861 int
863 {
864  LIST_HEAD(post_sblist);
865  int num_posted, rc = 0;
866 
867  /* get all SCSI buffers need to repost to a local list */
868  spin_lock(&phba->scsi_buf_list_lock);
869  list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
870  spin_unlock(&phba->scsi_buf_list_lock);
871 
872  /* post the list of scsi buffer sgls to port if available */
873  if (!list_empty(&post_sblist)) {
874  num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
875  phba->sli4_hba.scsi_xri_cnt);
876  /* failed to post any scsi buffer, return error */
877  if (num_posted == 0)
878  rc = -EIO;
879  }
880  return rc;
881 }
882 
897 static int
898 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
899 {
900  struct lpfc_hba *phba = vport->phba;
901  struct lpfc_scsi_buf *psb;
902  struct sli4_sge *sgl;
903  IOCB_t *iocb;
904  dma_addr_t pdma_phys_fcp_cmd;
905  dma_addr_t pdma_phys_fcp_rsp;
906  dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
907  uint16_t iotag, lxri = 0;
908  int bcnt, num_posted;
909  LIST_HEAD(prep_sblist);
910  LIST_HEAD(post_sblist);
911  LIST_HEAD(scsi_sblist);
912 
913  for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
914  psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
915  if (!psb)
916  break;
917  /*
918  * Get memory from the pci pool to map the virt space to
919  * pci bus space for an I/O. The DMA buffer includes space
920  * for the struct fcp_cmnd, struct fcp_rsp and the number
921  * of bde's necessary to support the sg_tablesize.
922  */
923  psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
924  GFP_KERNEL, &psb->dma_handle);
925  if (!psb->data) {
926  kfree(psb);
927  break;
928  }
929  memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
930 
931  /* Allocate iotag for psb->cur_iocbq. */
932  iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
933  if (iotag == 0) {
934  pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
935  psb->data, psb->dma_handle);
936  kfree(psb);
937  break;
938  }
939 
940  lxri = lpfc_sli4_next_xritag(phba);
941  if (lxri == NO_XRI) {
942  pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
943  psb->data, psb->dma_handle);
944  kfree(psb);
945  break;
946  }
947  psb->cur_iocbq.sli4_lxritag = lxri;
948  psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
949  psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
950  psb->fcp_bpl = psb->data;
951  psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
952  - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
953  psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
954  sizeof(struct fcp_cmnd));
955 
956  /* Initialize local short-hand pointers. */
957  sgl = (struct sli4_sge *)psb->fcp_bpl;
958  pdma_phys_bpl = psb->dma_handle;
959  pdma_phys_fcp_cmd =
960  (psb->dma_handle + phba->cfg_sg_dma_buf_size)
961  - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
962  pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
963 
964  /*
965  * The first two bdes are the FCP_CMD and FCP_RSP.
966  * The balance are sg list bdes. Initialize the
967  * first two and leave the rest for queuecommand.
968  */
969  sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
970  sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
971  sgl->word2 = le32_to_cpu(sgl->word2);
972  bf_set(lpfc_sli4_sge_last, sgl, 0);
973  sgl->word2 = cpu_to_le32(sgl->word2);
974  sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
975  sgl++;
976 
977  /* Setup the physical region for the FCP RSP */
978  sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
979  sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
980  sgl->word2 = le32_to_cpu(sgl->word2);
981  bf_set(lpfc_sli4_sge_last, sgl, 1);
982  sgl->word2 = cpu_to_le32(sgl->word2);
983  sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
984 
985  /*
986  * Since the IOCB for the FCP I/O is built into this
987  * lpfc_scsi_buf, initialize it with all known data now.
988  */
989  iocb = &psb->cur_iocbq.iocb;
990  iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
992  /* setting the BLP size to 2 * sizeof BDE may not be correct.
993  * We are setting the bpl to point to out sgl. An sgl's
994  * entries are 16 bytes, a bpl entries are 12 bytes.
995  */
996  iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
997  iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
998  iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
999  iocb->ulpBdeCount = 1;
1000  iocb->ulpLe = 1;
1001  iocb->ulpClass = CLASS3;
1002  psb->cur_iocbq.context1 = psb;
1003  if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1004  pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1005  else
1006  pdma_phys_bpl1 = 0;
1007  psb->dma_phys_bpl = pdma_phys_bpl;
1008 
1009  /* add the scsi buffer to a post list */
1010  list_add_tail(&psb->list, &post_sblist);
1011  spin_lock_irq(&phba->scsi_buf_list_lock);
1012  phba->sli4_hba.scsi_xri_cnt++;
1013  spin_unlock_irq(&phba->scsi_buf_list_lock);
1014  }
1016  "3021 Allocate %d out of %d requested new SCSI "
1017  "buffers\n", bcnt, num_to_alloc);
1018 
1019  /* post the list of scsi buffer sgls to port if available */
1020  if (!list_empty(&post_sblist))
1021  num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1022  &post_sblist, bcnt);
1023  else
1024  num_posted = 0;
1025 
1026  return num_posted;
1027 }
1028 
1041 static inline int
1042 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1043 {
1044  return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1045 }
1046 
1058 static struct lpfc_scsi_buf*
1059 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1060 {
1061  struct lpfc_scsi_buf * lpfc_cmd = NULL;
1062  struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1063  unsigned long iflag = 0;
1064 
1065  spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1066  list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1067  if (lpfc_cmd) {
1068  lpfc_cmd->seg_cnt = 0;
1069  lpfc_cmd->nonsg_phys = 0;
1070  lpfc_cmd->prot_seg_cnt = 0;
1071  }
1072  spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1073  return lpfc_cmd;
1074 }
1086 static struct lpfc_scsi_buf*
1087 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1088 {
1089  struct lpfc_scsi_buf *lpfc_cmd ;
1090  unsigned long iflag = 0;
1091  int found = 0;
1092 
1093  spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1094  list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1095  list) {
1096  if (lpfc_test_rrq_active(phba, ndlp,
1097  lpfc_cmd->cur_iocbq.sli4_lxritag))
1098  continue;
1099  list_del(&lpfc_cmd->list);
1100  found = 1;
1101  lpfc_cmd->seg_cnt = 0;
1102  lpfc_cmd->nonsg_phys = 0;
1103  lpfc_cmd->prot_seg_cnt = 0;
1104  break;
1105  }
1106  spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1107  iflag);
1108  if (!found)
1109  return NULL;
1110  else
1111  return lpfc_cmd;
1112 }
1124 static struct lpfc_scsi_buf*
1125 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1126 {
1127  return phba->lpfc_get_scsi_buf(phba, ndlp);
1128 }
1129 
1138 static void
1139 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1140 {
1141  unsigned long iflag = 0;
1142 
1143  spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1144  psb->pCmd = NULL;
1145  list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1146  spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1147 }
1148 
1159 static void
1160 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1161 {
1162  unsigned long iflag = 0;
1163 
1164  if (psb->exch_busy) {
1165  spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1166  iflag);
1167  psb->pCmd = NULL;
1168  list_add_tail(&psb->list,
1169  &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1170  spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1171  iflag);
1172  } else {
1173 
1174  spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1175  psb->pCmd = NULL;
1176  list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1177  spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1178  }
1179 }
1180 
1189 static void
1190 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1191 {
1192 
1193  phba->lpfc_release_scsi_buf(phba, psb);
1194 }
1195 
1210 static int
1211 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1212 {
1213  struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1214  struct scatterlist *sgel = NULL;
1215  struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1216  struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1217  struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1218  IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1219  struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1220  dma_addr_t physaddr;
1221  uint32_t num_bde = 0;
1222  int nseg, datadir = scsi_cmnd->sc_data_direction;
1223 
1224  /*
1225  * There are three possibilities here - use scatter-gather segment, use
1226  * the single mapping, or neither. Start the lpfc command prep by
1227  * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1228  * data bde entry.
1229  */
1230  bpl += 2;
1231  if (scsi_sg_count(scsi_cmnd)) {
1232  /*
1233  * The driver stores the segment count returned from pci_map_sg
1234  * because this a count of dma-mappings used to map the use_sg
1235  * pages. They are not guaranteed to be the same for those
1236  * architectures that implement an IOMMU.
1237  */
1238 
1239  nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1240  scsi_sg_count(scsi_cmnd), datadir);
1241  if (unlikely(!nseg))
1242  return 1;
1243 
1244  lpfc_cmd->seg_cnt = nseg;
1245  if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1247  "9064 BLKGRD: %s: Too many sg segments from "
1248  "dma_map_sg. Config %d, seg_cnt %d\n",
1249  __func__, phba->cfg_sg_seg_cnt,
1250  lpfc_cmd->seg_cnt);
1251  scsi_dma_unmap(scsi_cmnd);
1252  return 1;
1253  }
1254 
1255  /*
1256  * The driver established a maximum scatter-gather segment count
1257  * during probe that limits the number of sg elements in any
1258  * single scsi command. Just run through the seg_cnt and format
1259  * the bde's.
1260  * When using SLI-3 the driver will try to fit all the BDEs into
1261  * the IOCB. If it can't then the BDEs get added to a BPL as it
1262  * does for SLI-2 mode.
1263  */
1264  scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1265  physaddr = sg_dma_address(sgel);
1266  if (phba->sli_rev == 3 &&
1267  !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1268  !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1269  nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1270  data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1271  data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1272  data_bde->addrLow = putPaddrLow(physaddr);
1273  data_bde->addrHigh = putPaddrHigh(physaddr);
1274  data_bde++;
1275  } else {
1276  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1277  bpl->tus.f.bdeSize = sg_dma_len(sgel);
1278  bpl->tus.w = le32_to_cpu(bpl->tus.w);
1279  bpl->addrLow =
1280  le32_to_cpu(putPaddrLow(physaddr));
1281  bpl->addrHigh =
1282  le32_to_cpu(putPaddrHigh(physaddr));
1283  bpl++;
1284  }
1285  }
1286  }
1287 
1288  /*
1289  * Finish initializing those IOCB fields that are dependent on the
1290  * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1291  * explicitly reinitialized and for SLI-3 the extended bde count is
1292  * explicitly reinitialized since all iocb memory resources are reused.
1293  */
1294  if (phba->sli_rev == 3 &&
1295  !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1296  !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1297  if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1298  /*
1299  * The extended IOCB format can only fit 3 BDE or a BPL.
1300  * This I/O has more than 3 BDE so the 1st data bde will
1301  * be a BPL that is filled in here.
1302  */
1303  physaddr = lpfc_cmd->dma_handle;
1304  data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1305  data_bde->tus.f.bdeSize = (num_bde *
1306  sizeof(struct ulp_bde64));
1307  physaddr += (sizeof(struct fcp_cmnd) +
1308  sizeof(struct fcp_rsp) +
1309  (2 * sizeof(struct ulp_bde64)));
1310  data_bde->addrHigh = putPaddrHigh(physaddr);
1311  data_bde->addrLow = putPaddrLow(physaddr);
1312  /* ebde count includes the response bde and data bpl */
1313  iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1314  } else {
1315  /* ebde count includes the response bde and data bdes */
1316  iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1317  }
1318  } else {
1319  iocb_cmd->un.fcpi64.bdl.bdeSize =
1320  ((num_bde + 2) * sizeof(struct ulp_bde64));
1321  iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1322  }
1323  fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1324 
1325  /*
1326  * Due to difference in data length between DIF/non-DIF paths,
1327  * we need to set word 4 of IOCB here
1328  */
1329  iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1330  return 0;
1331 }
1332 
1333 static inline unsigned
1334 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1335 {
1336  return sc->device->sector_size;
1337 }
1338 
1339 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1340 
1341 /* Return if if error injection is detected by Initiator */
1342 #define BG_ERR_INIT 0x1
1343 /* Return if if error injection is detected by Target */
1344 #define BG_ERR_TGT 0x2
1345 /* Return if if swapping CSUM<-->CRC is required for error injection */
1346 #define BG_ERR_SWAP 0x10
1347 /* Return if disabling Guard/Ref/App checking is required for error injection */
1348 #define BG_ERR_CHECK 0x20
1349 
1360 static int
1361 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1362  uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1363 {
1364  struct scatterlist *sgpe; /* s/g prot entry */
1365  struct scatterlist *sgde; /* s/g data entry */
1366  struct lpfc_scsi_buf *lpfc_cmd = NULL;
1367  struct scsi_dif_tuple *src = NULL;
1368  struct lpfc_nodelist *ndlp;
1369  struct lpfc_rport_data *rdata;
1370  uint32_t op = scsi_get_prot_op(sc);
1371  uint32_t blksize;
1372  uint32_t numblks;
1373  sector_t lba;
1374  int rc = 0;
1375  int blockoff = 0;
1376 
1377  if (op == SCSI_PROT_NORMAL)
1378  return 0;
1379 
1380  sgpe = scsi_prot_sglist(sc);
1381  sgde = scsi_sglist(sc);
1382  lba = scsi_get_lba(sc);
1383 
1384  /* First check if we need to match the LBA */
1385  if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1386  blksize = lpfc_cmd_blksize(sc);
1387  numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1388 
1389  /* Make sure we have the right LBA if one is specified */
1390  if ((phba->lpfc_injerr_lba < lba) ||
1391  (phba->lpfc_injerr_lba >= (lba + numblks)))
1392  return 0;
1393  if (sgpe) {
1394  blockoff = phba->lpfc_injerr_lba - lba;
1395  numblks = sg_dma_len(sgpe) /
1396  sizeof(struct scsi_dif_tuple);
1397  if (numblks < blockoff)
1398  blockoff = numblks;
1399  }
1400  }
1401 
1402  /* Next check if we need to match the remote NPortID or WWPN */
1403  rdata = sc->device->hostdata;
1404  if (rdata && rdata->pnode) {
1405  ndlp = rdata->pnode;
1406 
1407  /* Make sure we have the right NPortID if one is specified */
1408  if (phba->lpfc_injerr_nportid &&
1409  (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1410  return 0;
1411 
1412  /*
1413  * Make sure we have the right WWPN if one is specified.
1414  * wwn[0] should be a non-zero NAA in a good WWPN.
1415  */
1416  if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1417  (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1418  sizeof(struct lpfc_name)) != 0))
1419  return 0;
1420  }
1421 
1422  /* Setup a ptr to the protection data if the SCSI host provides it */
1423  if (sgpe) {
1424  src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1425  src += blockoff;
1426  lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1427  }
1428 
1429  /* Should we change the Reference Tag */
1430  if (reftag) {
1431  if (phba->lpfc_injerr_wref_cnt) {
1432  switch (op) {
1433  case SCSI_PROT_WRITE_PASS:
1434  if (src) {
1435  /*
1436  * For WRITE_PASS, force the error
1437  * to be sent on the wire. It should
1438  * be detected by the Target.
1439  * If blockoff != 0 error will be
1440  * inserted in middle of the IO.
1441  */
1442 
1444  "9076 BLKGRD: Injecting reftag error: "
1445  "write lba x%lx + x%x oldrefTag x%x\n",
1446  (unsigned long)lba, blockoff,
1447  be32_to_cpu(src->ref_tag));
1448 
1449  /*
1450  * Save the old ref_tag so we can
1451  * restore it on completion.
1452  */
1453  if (lpfc_cmd) {
1454  lpfc_cmd->prot_data_type =
1455  LPFC_INJERR_REFTAG;
1456  lpfc_cmd->prot_data_segment =
1457  src;
1458  lpfc_cmd->prot_data =
1459  src->ref_tag;
1460  }
1461  src->ref_tag = cpu_to_be32(0xDEADBEEF);
1462  phba->lpfc_injerr_wref_cnt--;
1463  if (phba->lpfc_injerr_wref_cnt == 0) {
1464  phba->lpfc_injerr_nportid = 0;
1465  phba->lpfc_injerr_lba =
1466  LPFC_INJERR_LBA_OFF;
1467  memset(&phba->lpfc_injerr_wwpn,
1468  0, sizeof(struct lpfc_name));
1469  }
1470  rc = BG_ERR_TGT | BG_ERR_CHECK;
1471 
1472  break;
1473  }
1474  /* Drop thru */
1476  /*
1477  * For WRITE_INSERT, force the error
1478  * to be sent on the wire. It should be
1479  * detected by the Target.
1480  */
1481  /* DEADBEEF will be the reftag on the wire */
1482  *reftag = 0xDEADBEEF;
1483  phba->lpfc_injerr_wref_cnt--;
1484  if (phba->lpfc_injerr_wref_cnt == 0) {
1485  phba->lpfc_injerr_nportid = 0;
1486  phba->lpfc_injerr_lba =
1487  LPFC_INJERR_LBA_OFF;
1488  memset(&phba->lpfc_injerr_wwpn,
1489  0, sizeof(struct lpfc_name));
1490  }
1491  rc = BG_ERR_TGT | BG_ERR_CHECK;
1492 
1494  "9078 BLKGRD: Injecting reftag error: "
1495  "write lba x%lx\n", (unsigned long)lba);
1496  break;
1497  case SCSI_PROT_WRITE_STRIP:
1498  /*
1499  * For WRITE_STRIP and WRITE_PASS,
1500  * force the error on data
1501  * being copied from SLI-Host to SLI-Port.
1502  */
1503  *reftag = 0xDEADBEEF;
1504  phba->lpfc_injerr_wref_cnt--;
1505  if (phba->lpfc_injerr_wref_cnt == 0) {
1506  phba->lpfc_injerr_nportid = 0;
1507  phba->lpfc_injerr_lba =
1508  LPFC_INJERR_LBA_OFF;
1509  memset(&phba->lpfc_injerr_wwpn,
1510  0, sizeof(struct lpfc_name));
1511  }
1512  rc = BG_ERR_INIT;
1513 
1515  "9077 BLKGRD: Injecting reftag error: "
1516  "write lba x%lx\n", (unsigned long)lba);
1517  break;
1518  }
1519  }
1520  if (phba->lpfc_injerr_rref_cnt) {
1521  switch (op) {
1522  case SCSI_PROT_READ_INSERT:
1523  case SCSI_PROT_READ_STRIP:
1524  case SCSI_PROT_READ_PASS:
1525  /*
1526  * For READ_STRIP and READ_PASS, force the
1527  * error on data being read off the wire. It
1528  * should force an IO error to the driver.
1529  */
1530  *reftag = 0xDEADBEEF;
1531  phba->lpfc_injerr_rref_cnt--;
1532  if (phba->lpfc_injerr_rref_cnt == 0) {
1533  phba->lpfc_injerr_nportid = 0;
1534  phba->lpfc_injerr_lba =
1535  LPFC_INJERR_LBA_OFF;
1536  memset(&phba->lpfc_injerr_wwpn,
1537  0, sizeof(struct lpfc_name));
1538  }
1539  rc = BG_ERR_INIT;
1540 
1542  "9079 BLKGRD: Injecting reftag error: "
1543  "read lba x%lx\n", (unsigned long)lba);
1544  break;
1545  }
1546  }
1547  }
1548 
1549  /* Should we change the Application Tag */
1550  if (apptag) {
1551  if (phba->lpfc_injerr_wapp_cnt) {
1552  switch (op) {
1553  case SCSI_PROT_WRITE_PASS:
1554  if (src) {
1555  /*
1556  * For WRITE_PASS, force the error
1557  * to be sent on the wire. It should
1558  * be detected by the Target.
1559  * If blockoff != 0 error will be
1560  * inserted in middle of the IO.
1561  */
1562 
1564  "9080 BLKGRD: Injecting apptag error: "
1565  "write lba x%lx + x%x oldappTag x%x\n",
1566  (unsigned long)lba, blockoff,
1567  be16_to_cpu(src->app_tag));
1568 
1569  /*
1570  * Save the old app_tag so we can
1571  * restore it on completion.
1572  */
1573  if (lpfc_cmd) {
1574  lpfc_cmd->prot_data_type =
1575  LPFC_INJERR_APPTAG;
1576  lpfc_cmd->prot_data_segment =
1577  src;
1578  lpfc_cmd->prot_data =
1579  src->app_tag;
1580  }
1581  src->app_tag = cpu_to_be16(0xDEAD);
1582  phba->lpfc_injerr_wapp_cnt--;
1583  if (phba->lpfc_injerr_wapp_cnt == 0) {
1584  phba->lpfc_injerr_nportid = 0;
1585  phba->lpfc_injerr_lba =
1586  LPFC_INJERR_LBA_OFF;
1587  memset(&phba->lpfc_injerr_wwpn,
1588  0, sizeof(struct lpfc_name));
1589  }
1590  rc = BG_ERR_TGT | BG_ERR_CHECK;
1591  break;
1592  }
1593  /* Drop thru */
1595  /*
1596  * For WRITE_INSERT, force the
1597  * error to be sent on the wire. It should be
1598  * detected by the Target.
1599  */
1600  /* DEAD will be the apptag on the wire */
1601  *apptag = 0xDEAD;
1602  phba->lpfc_injerr_wapp_cnt--;
1603  if (phba->lpfc_injerr_wapp_cnt == 0) {
1604  phba->lpfc_injerr_nportid = 0;
1605  phba->lpfc_injerr_lba =
1606  LPFC_INJERR_LBA_OFF;
1607  memset(&phba->lpfc_injerr_wwpn,
1608  0, sizeof(struct lpfc_name));
1609  }
1610  rc = BG_ERR_TGT | BG_ERR_CHECK;
1611 
1613  "0813 BLKGRD: Injecting apptag error: "
1614  "write lba x%lx\n", (unsigned long)lba);
1615  break;
1616  case SCSI_PROT_WRITE_STRIP:
1617  /*
1618  * For WRITE_STRIP and WRITE_PASS,
1619  * force the error on data
1620  * being copied from SLI-Host to SLI-Port.
1621  */
1622  *apptag = 0xDEAD;
1623  phba->lpfc_injerr_wapp_cnt--;
1624  if (phba->lpfc_injerr_wapp_cnt == 0) {
1625  phba->lpfc_injerr_nportid = 0;
1626  phba->lpfc_injerr_lba =
1627  LPFC_INJERR_LBA_OFF;
1628  memset(&phba->lpfc_injerr_wwpn,
1629  0, sizeof(struct lpfc_name));
1630  }
1631  rc = BG_ERR_INIT;
1632 
1634  "0812 BLKGRD: Injecting apptag error: "
1635  "write lba x%lx\n", (unsigned long)lba);
1636  break;
1637  }
1638  }
1639  if (phba->lpfc_injerr_rapp_cnt) {
1640  switch (op) {
1641  case SCSI_PROT_READ_INSERT:
1642  case SCSI_PROT_READ_STRIP:
1643  case SCSI_PROT_READ_PASS:
1644  /*
1645  * For READ_STRIP and READ_PASS, force the
1646  * error on data being read off the wire. It
1647  * should force an IO error to the driver.
1648  */
1649  *apptag = 0xDEAD;
1650  phba->lpfc_injerr_rapp_cnt--;
1651  if (phba->lpfc_injerr_rapp_cnt == 0) {
1652  phba->lpfc_injerr_nportid = 0;
1653  phba->lpfc_injerr_lba =
1654  LPFC_INJERR_LBA_OFF;
1655  memset(&phba->lpfc_injerr_wwpn,
1656  0, sizeof(struct lpfc_name));
1657  }
1658  rc = BG_ERR_INIT;
1659 
1661  "0814 BLKGRD: Injecting apptag error: "
1662  "read lba x%lx\n", (unsigned long)lba);
1663  break;
1664  }
1665  }
1666  }
1667 
1668 
1669  /* Should we change the Guard Tag */
1670  if (new_guard) {
1671  if (phba->lpfc_injerr_wgrd_cnt) {
1672  switch (op) {
1673  case SCSI_PROT_WRITE_PASS:
1674  rc = BG_ERR_CHECK;
1675  /* Drop thru */
1676 
1678  /*
1679  * For WRITE_INSERT, force the
1680  * error to be sent on the wire. It should be
1681  * detected by the Target.
1682  */
1683  phba->lpfc_injerr_wgrd_cnt--;
1684  if (phba->lpfc_injerr_wgrd_cnt == 0) {
1685  phba->lpfc_injerr_nportid = 0;
1686  phba->lpfc_injerr_lba =
1687  LPFC_INJERR_LBA_OFF;
1688  memset(&phba->lpfc_injerr_wwpn,
1689  0, sizeof(struct lpfc_name));
1690  }
1691 
1692  rc |= BG_ERR_TGT | BG_ERR_SWAP;
1693  /* Signals the caller to swap CRC->CSUM */
1694 
1696  "0817 BLKGRD: Injecting guard error: "
1697  "write lba x%lx\n", (unsigned long)lba);
1698  break;
1699  case SCSI_PROT_WRITE_STRIP:
1700  /*
1701  * For WRITE_STRIP and WRITE_PASS,
1702  * force the error on data
1703  * being copied from SLI-Host to SLI-Port.
1704  */
1705  phba->lpfc_injerr_wgrd_cnt--;
1706  if (phba->lpfc_injerr_wgrd_cnt == 0) {
1707  phba->lpfc_injerr_nportid = 0;
1708  phba->lpfc_injerr_lba =
1709  LPFC_INJERR_LBA_OFF;
1710  memset(&phba->lpfc_injerr_wwpn,
1711  0, sizeof(struct lpfc_name));
1712  }
1713 
1714  rc = BG_ERR_INIT | BG_ERR_SWAP;
1715  /* Signals the caller to swap CRC->CSUM */
1716 
1718  "0816 BLKGRD: Injecting guard error: "
1719  "write lba x%lx\n", (unsigned long)lba);
1720  break;
1721  }
1722  }
1723  if (phba->lpfc_injerr_rgrd_cnt) {
1724  switch (op) {
1725  case SCSI_PROT_READ_INSERT:
1726  case SCSI_PROT_READ_STRIP:
1727  case SCSI_PROT_READ_PASS:
1728  /*
1729  * For READ_STRIP and READ_PASS, force the
1730  * error on data being read off the wire. It
1731  * should force an IO error to the driver.
1732  */
1733  phba->lpfc_injerr_rgrd_cnt--;
1734  if (phba->lpfc_injerr_rgrd_cnt == 0) {
1735  phba->lpfc_injerr_nportid = 0;
1736  phba->lpfc_injerr_lba =
1737  LPFC_INJERR_LBA_OFF;
1738  memset(&phba->lpfc_injerr_wwpn,
1739  0, sizeof(struct lpfc_name));
1740  }
1741 
1742  rc = BG_ERR_INIT | BG_ERR_SWAP;
1743  /* Signals the caller to swap CRC->CSUM */
1744 
1746  "0818 BLKGRD: Injecting guard error: "
1747  "read lba x%lx\n", (unsigned long)lba);
1748  }
1749  }
1750  }
1751 
1752  return rc;
1753 }
1754 #endif
1755 
1767 static int
1768 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1769  uint8_t *txop, uint8_t *rxop)
1770 {
1771  uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1772  uint8_t ret = 0;
1773 
1774  if (guard_type == SHOST_DIX_GUARD_IP) {
1775  switch (scsi_get_prot_op(sc)) {
1776  case SCSI_PROT_READ_INSERT:
1777  case SCSI_PROT_WRITE_STRIP:
1778  *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1779  *txop = BG_OP_IN_CSUM_OUT_NODIF;
1780  break;
1781 
1782  case SCSI_PROT_READ_STRIP:
1784  *rxop = BG_OP_IN_CRC_OUT_NODIF;
1785  *txop = BG_OP_IN_NODIF_OUT_CRC;
1786  break;
1787 
1788  case SCSI_PROT_READ_PASS:
1789  case SCSI_PROT_WRITE_PASS:
1790  *rxop = BG_OP_IN_CRC_OUT_CSUM;
1791  *txop = BG_OP_IN_CSUM_OUT_CRC;
1792  break;
1793 
1794  case SCSI_PROT_NORMAL:
1795  default:
1797  "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1798  scsi_get_prot_op(sc));
1799  ret = 1;
1800  break;
1801 
1802  }
1803  } else {
1804  switch (scsi_get_prot_op(sc)) {
1805  case SCSI_PROT_READ_STRIP:
1807  *rxop = BG_OP_IN_CRC_OUT_NODIF;
1808  *txop = BG_OP_IN_NODIF_OUT_CRC;
1809  break;
1810 
1811  case SCSI_PROT_READ_PASS:
1812  case SCSI_PROT_WRITE_PASS:
1813  *rxop = BG_OP_IN_CRC_OUT_CRC;
1814  *txop = BG_OP_IN_CRC_OUT_CRC;
1815  break;
1816 
1817  case SCSI_PROT_READ_INSERT:
1818  case SCSI_PROT_WRITE_STRIP:
1819  *rxop = BG_OP_IN_NODIF_OUT_CRC;
1820  *txop = BG_OP_IN_CRC_OUT_NODIF;
1821  break;
1822 
1823  case SCSI_PROT_NORMAL:
1824  default:
1826  "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1827  scsi_get_prot_op(sc));
1828  ret = 1;
1829  break;
1830  }
1831  }
1832 
1833  return ret;
1834 }
1835 
1836 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1837 
1848 static int
1849 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1850  uint8_t *txop, uint8_t *rxop)
1851 {
1852  uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1853  uint8_t ret = 0;
1854 
1855  if (guard_type == SHOST_DIX_GUARD_IP) {
1856  switch (scsi_get_prot_op(sc)) {
1857  case SCSI_PROT_READ_INSERT:
1858  case SCSI_PROT_WRITE_STRIP:
1859  *rxop = BG_OP_IN_NODIF_OUT_CRC;
1860  *txop = BG_OP_IN_CRC_OUT_NODIF;
1861  break;
1862 
1863  case SCSI_PROT_READ_STRIP:
1865  *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1866  *txop = BG_OP_IN_NODIF_OUT_CSUM;
1867  break;
1868 
1869  case SCSI_PROT_READ_PASS:
1870  case SCSI_PROT_WRITE_PASS:
1871  *rxop = BG_OP_IN_CSUM_OUT_CRC;
1872  *txop = BG_OP_IN_CRC_OUT_CSUM;
1873  break;
1874 
1875  case SCSI_PROT_NORMAL:
1876  default:
1877  break;
1878 
1879  }
1880  } else {
1881  switch (scsi_get_prot_op(sc)) {
1882  case SCSI_PROT_READ_STRIP:
1884  *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1885  *txop = BG_OP_IN_NODIF_OUT_CSUM;
1886  break;
1887 
1888  case SCSI_PROT_READ_PASS:
1889  case SCSI_PROT_WRITE_PASS:
1890  *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1891  *txop = BG_OP_IN_CSUM_OUT_CSUM;
1892  break;
1893 
1894  case SCSI_PROT_READ_INSERT:
1895  case SCSI_PROT_WRITE_STRIP:
1896  *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1897  *txop = BG_OP_IN_CSUM_OUT_NODIF;
1898  break;
1899 
1900  case SCSI_PROT_NORMAL:
1901  default:
1902  break;
1903  }
1904  }
1905 
1906  return ret;
1907 }
1908 #endif
1909 
1941 static int
1942 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1943  struct ulp_bde64 *bpl, int datasegcnt)
1944 {
1945  struct scatterlist *sgde = NULL; /* s/g data entry */
1946  struct lpfc_pde5 *pde5 = NULL;
1947  struct lpfc_pde6 *pde6 = NULL;
1948  dma_addr_t physaddr;
1949  int i = 0, num_bde = 0, status;
1950  int datadir = sc->sc_data_direction;
1951 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1952  uint32_t rc;
1953 #endif
1954  uint32_t checking = 1;
1955  uint32_t reftag;
1956  unsigned blksize;
1957  uint8_t txop, rxop;
1958 
1959  status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1960  if (status)
1961  goto out;
1962 
1963  /* extract some info from the scsi command for pde*/
1964  blksize = lpfc_cmd_blksize(sc);
1965  reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1966 
1967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1968  rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1969  if (rc) {
1970  if (rc & BG_ERR_SWAP)
1971  lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1972  if (rc & BG_ERR_CHECK)
1973  checking = 0;
1974  }
1975 #endif
1976 
1977  /* setup PDE5 with what we have */
1978  pde5 = (struct lpfc_pde5 *) bpl;
1979  memset(pde5, 0, sizeof(struct lpfc_pde5));
1980  bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1981 
1982  /* Endianness conversion if necessary for PDE5 */
1983  pde5->word0 = cpu_to_le32(pde5->word0);
1984  pde5->reftag = cpu_to_le32(reftag);
1985 
1986  /* advance bpl and increment bde count */
1987  num_bde++;
1988  bpl++;
1989  pde6 = (struct lpfc_pde6 *) bpl;
1990 
1991  /* setup PDE6 with the rest of the info */
1992  memset(pde6, 0, sizeof(struct lpfc_pde6));
1993  bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1994  bf_set(pde6_optx, pde6, txop);
1995  bf_set(pde6_oprx, pde6, rxop);
1996  if (datadir == DMA_FROM_DEVICE) {
1997  bf_set(pde6_ce, pde6, checking);
1998  bf_set(pde6_re, pde6, checking);
1999  }
2000  bf_set(pde6_ai, pde6, 1);
2001  bf_set(pde6_ae, pde6, 0);
2002  bf_set(pde6_apptagval, pde6, 0);
2003 
2004  /* Endianness conversion if necessary for PDE6 */
2005  pde6->word0 = cpu_to_le32(pde6->word0);
2006  pde6->word1 = cpu_to_le32(pde6->word1);
2007  pde6->word2 = cpu_to_le32(pde6->word2);
2008 
2009  /* advance bpl and increment bde count */
2010  num_bde++;
2011  bpl++;
2012 
2013  /* assumption: caller has already run dma_map_sg on command data */
2014  scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2015  physaddr = sg_dma_address(sgde);
2016  bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2017  bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2018  bpl->tus.f.bdeSize = sg_dma_len(sgde);
2019  if (datadir == DMA_TO_DEVICE)
2020  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2021  else
2022  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2023  bpl->tus.w = le32_to_cpu(bpl->tus.w);
2024  bpl++;
2025  num_bde++;
2026  }
2027 
2028 out:
2029  return num_bde;
2030 }
2031 
2071 static int
2072 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2073  struct ulp_bde64 *bpl, int datacnt, int protcnt)
2074 {
2075  struct scatterlist *sgde = NULL; /* s/g data entry */
2076  struct scatterlist *sgpe = NULL; /* s/g prot entry */
2077  struct lpfc_pde5 *pde5 = NULL;
2078  struct lpfc_pde6 *pde6 = NULL;
2079  struct lpfc_pde7 *pde7 = NULL;
2080  dma_addr_t dataphysaddr, protphysaddr;
2081  unsigned short curr_data = 0, curr_prot = 0;
2082  unsigned int split_offset;
2083  unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2084  unsigned int protgrp_blks, protgrp_bytes;
2085  unsigned int remainder, subtotal;
2086  int status;
2087  int datadir = sc->sc_data_direction;
2088  unsigned char pgdone = 0, alldone = 0;
2089  unsigned blksize;
2090 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2091  uint32_t rc;
2092 #endif
2093  uint32_t checking = 1;
2094  uint32_t reftag;
2095  uint8_t txop, rxop;
2096  int num_bde = 0;
2097 
2098  sgpe = scsi_prot_sglist(sc);
2099  sgde = scsi_sglist(sc);
2100 
2101  if (!sgpe || !sgde) {
2103  "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2104  sgpe, sgde);
2105  return 0;
2106  }
2107 
2108  status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2109  if (status)
2110  goto out;
2111 
2112  /* extract some info from the scsi command */
2113  blksize = lpfc_cmd_blksize(sc);
2114  reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2115 
2116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2117  rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2118  if (rc) {
2119  if (rc & BG_ERR_SWAP)
2120  lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2121  if (rc & BG_ERR_CHECK)
2122  checking = 0;
2123  }
2124 #endif
2125 
2126  split_offset = 0;
2127  do {
2128  /* setup PDE5 with what we have */
2129  pde5 = (struct lpfc_pde5 *) bpl;
2130  memset(pde5, 0, sizeof(struct lpfc_pde5));
2131  bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2132 
2133  /* Endianness conversion if necessary for PDE5 */
2134  pde5->word0 = cpu_to_le32(pde5->word0);
2135  pde5->reftag = cpu_to_le32(reftag);
2136 
2137  /* advance bpl and increment bde count */
2138  num_bde++;
2139  bpl++;
2140  pde6 = (struct lpfc_pde6 *) bpl;
2141 
2142  /* setup PDE6 with the rest of the info */
2143  memset(pde6, 0, sizeof(struct lpfc_pde6));
2144  bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2145  bf_set(pde6_optx, pde6, txop);
2146  bf_set(pde6_oprx, pde6, rxop);
2147  bf_set(pde6_ce, pde6, checking);
2148  bf_set(pde6_re, pde6, checking);
2149  bf_set(pde6_ai, pde6, 1);
2150  bf_set(pde6_ae, pde6, 0);
2151  bf_set(pde6_apptagval, pde6, 0);
2152 
2153  /* Endianness conversion if necessary for PDE6 */
2154  pde6->word0 = cpu_to_le32(pde6->word0);
2155  pde6->word1 = cpu_to_le32(pde6->word1);
2156  pde6->word2 = cpu_to_le32(pde6->word2);
2157 
2158  /* advance bpl and increment bde count */
2159  num_bde++;
2160  bpl++;
2161 
2162  /* setup the first BDE that points to protection buffer */
2163  protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2164  protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2165 
2166  /* must be integer multiple of the DIF block length */
2167  BUG_ON(protgroup_len % 8);
2168 
2169  pde7 = (struct lpfc_pde7 *) bpl;
2170  memset(pde7, 0, sizeof(struct lpfc_pde7));
2171  bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2172 
2173  pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2174  pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2175 
2176  protgrp_blks = protgroup_len / 8;
2177  protgrp_bytes = protgrp_blks * blksize;
2178 
2179  /* check if this pde is crossing the 4K boundary; if so split */
2180  if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2181  protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2182  protgroup_offset += protgroup_remainder;
2183  protgrp_blks = protgroup_remainder / 8;
2184  protgrp_bytes = protgrp_blks * blksize;
2185  } else {
2186  protgroup_offset = 0;
2187  curr_prot++;
2188  }
2189 
2190  num_bde++;
2191 
2192  /* setup BDE's for data blocks associated with DIF data */
2193  pgdone = 0;
2194  subtotal = 0; /* total bytes processed for current prot grp */
2195  while (!pgdone) {
2196  if (!sgde) {
2198  "9065 BLKGRD:%s Invalid data segment\n",
2199  __func__);
2200  return 0;
2201  }
2202  bpl++;
2203  dataphysaddr = sg_dma_address(sgde) + split_offset;
2204  bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2205  bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2206 
2207  remainder = sg_dma_len(sgde) - split_offset;
2208 
2209  if ((subtotal + remainder) <= protgrp_bytes) {
2210  /* we can use this whole buffer */
2211  bpl->tus.f.bdeSize = remainder;
2212  split_offset = 0;
2213 
2214  if ((subtotal + remainder) == protgrp_bytes)
2215  pgdone = 1;
2216  } else {
2217  /* must split this buffer with next prot grp */
2218  bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2219  split_offset += bpl->tus.f.bdeSize;
2220  }
2221 
2222  subtotal += bpl->tus.f.bdeSize;
2223 
2224  if (datadir == DMA_TO_DEVICE)
2225  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2226  else
2227  bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2228  bpl->tus.w = le32_to_cpu(bpl->tus.w);
2229 
2230  num_bde++;
2231  curr_data++;
2232 
2233  if (split_offset)
2234  break;
2235 
2236  /* Move to the next s/g segment if possible */
2237  sgde = sg_next(sgde);
2238 
2239  }
2240 
2241  if (protgroup_offset) {
2242  /* update the reference tag */
2243  reftag += protgrp_blks;
2244  bpl++;
2245  continue;
2246  }
2247 
2248  /* are we done ? */
2249  if (curr_prot == protcnt) {
2250  alldone = 1;
2251  } else if (curr_prot < protcnt) {
2252  /* advance to next prot buffer */
2253  sgpe = sg_next(sgpe);
2254  bpl++;
2255 
2256  /* update the reference tag */
2257  reftag += protgrp_blks;
2258  } else {
2259  /* if we're here, we have a bug */
2261  "9054 BLKGRD: bug in %s\n", __func__);
2262  }
2263 
2264  } while (!alldone);
2265 out:
2266 
2267  return num_bde;
2268 }
2269 
2299 static int
2300 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2301  struct sli4_sge *sgl, int datasegcnt)
2302 {
2303  struct scatterlist *sgde = NULL; /* s/g data entry */
2304  struct sli4_sge_diseed *diseed = NULL;
2305  dma_addr_t physaddr;
2306  int i = 0, num_sge = 0, status;
2307  int datadir = sc->sc_data_direction;
2308  uint32_t reftag;
2309  unsigned blksize;
2310  uint8_t txop, rxop;
2311 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2312  uint32_t rc;
2313 #endif
2314  uint32_t checking = 1;
2315  uint32_t dma_len;
2316  uint32_t dma_offset = 0;
2317 
2318  status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2319  if (status)
2320  goto out;
2321 
2322  /* extract some info from the scsi command for pde*/
2323  blksize = lpfc_cmd_blksize(sc);
2324  reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2325 
2326 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2327  rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2328  if (rc) {
2329  if (rc & BG_ERR_SWAP)
2330  lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2331  if (rc & BG_ERR_CHECK)
2332  checking = 0;
2333  }
2334 #endif
2335 
2336  /* setup DISEED with what we have */
2337  diseed = (struct sli4_sge_diseed *) sgl;
2338  memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2339  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2340 
2341  /* Endianness conversion if necessary */
2342  diseed->ref_tag = cpu_to_le32(reftag);
2343  diseed->ref_tag_tran = diseed->ref_tag;
2344 
2345  /* setup DISEED with the rest of the info */
2346  bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2347  bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2348  if (datadir == DMA_FROM_DEVICE) {
2349  bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2350  bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2351  }
2352  bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2353  bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2354 
2355  /* Endianness conversion if necessary for DISEED */
2356  diseed->word2 = cpu_to_le32(diseed->word2);
2357  diseed->word3 = cpu_to_le32(diseed->word3);
2358 
2359  /* advance bpl and increment sge count */
2360  num_sge++;
2361  sgl++;
2362 
2363  /* assumption: caller has already run dma_map_sg on command data */
2364  scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2365  physaddr = sg_dma_address(sgde);
2366  dma_len = sg_dma_len(sgde);
2367  sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2368  sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2369  if ((i + 1) == datasegcnt)
2370  bf_set(lpfc_sli4_sge_last, sgl, 1);
2371  else
2372  bf_set(lpfc_sli4_sge_last, sgl, 0);
2373  bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2374  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2375 
2376  sgl->sge_len = cpu_to_le32(dma_len);
2377  dma_offset += dma_len;
2378 
2379  sgl++;
2380  num_sge++;
2381  }
2382 
2383 out:
2384  return num_sge;
2385 }
2386 
2424 static int
2425 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2426  struct sli4_sge *sgl, int datacnt, int protcnt)
2427 {
2428  struct scatterlist *sgde = NULL; /* s/g data entry */
2429  struct scatterlist *sgpe = NULL; /* s/g prot entry */
2430  struct sli4_sge_diseed *diseed = NULL;
2431  dma_addr_t dataphysaddr, protphysaddr;
2432  unsigned short curr_data = 0, curr_prot = 0;
2433  unsigned int split_offset;
2434  unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2435  unsigned int protgrp_blks, protgrp_bytes;
2436  unsigned int remainder, subtotal;
2437  int status;
2438  unsigned char pgdone = 0, alldone = 0;
2439  unsigned blksize;
2440  uint32_t reftag;
2441  uint8_t txop, rxop;
2442  uint32_t dma_len;
2443 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2444  uint32_t rc;
2445 #endif
2446  uint32_t checking = 1;
2447  uint32_t dma_offset = 0;
2448  int num_sge = 0;
2449 
2450  sgpe = scsi_prot_sglist(sc);
2451  sgde = scsi_sglist(sc);
2452 
2453  if (!sgpe || !sgde) {
2455  "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2456  sgpe, sgde);
2457  return 0;
2458  }
2459 
2460  status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2461  if (status)
2462  goto out;
2463 
2464  /* extract some info from the scsi command */
2465  blksize = lpfc_cmd_blksize(sc);
2466  reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2467 
2468 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2469  rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2470  if (rc) {
2471  if (rc & BG_ERR_SWAP)
2472  lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2473  if (rc & BG_ERR_CHECK)
2474  checking = 0;
2475  }
2476 #endif
2477 
2478  split_offset = 0;
2479  do {
2480  /* setup DISEED with what we have */
2481  diseed = (struct sli4_sge_diseed *) sgl;
2482  memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2483  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2484 
2485  /* Endianness conversion if necessary */
2486  diseed->ref_tag = cpu_to_le32(reftag);
2487  diseed->ref_tag_tran = diseed->ref_tag;
2488 
2489  /* setup DISEED with the rest of the info */
2490  bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2491  bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2492  bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2493  bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2494  bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2495  bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2496 
2497  /* Endianness conversion if necessary for DISEED */
2498  diseed->word2 = cpu_to_le32(diseed->word2);
2499  diseed->word3 = cpu_to_le32(diseed->word3);
2500 
2501  /* advance sgl and increment bde count */
2502  num_sge++;
2503  sgl++;
2504 
2505  /* setup the first BDE that points to protection buffer */
2506  protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2507  protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2508 
2509  /* must be integer multiple of the DIF block length */
2510  BUG_ON(protgroup_len % 8);
2511 
2512  /* Now setup DIF SGE */
2513  sgl->word2 = 0;
2514  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2515  sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2516  sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2517  sgl->word2 = cpu_to_le32(sgl->word2);
2518 
2519  protgrp_blks = protgroup_len / 8;
2520  protgrp_bytes = protgrp_blks * blksize;
2521 
2522  /* check if DIF SGE is crossing the 4K boundary; if so split */
2523  if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2524  protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2525  protgroup_offset += protgroup_remainder;
2526  protgrp_blks = protgroup_remainder / 8;
2527  protgrp_bytes = protgrp_blks * blksize;
2528  } else {
2529  protgroup_offset = 0;
2530  curr_prot++;
2531  }
2532 
2533  num_sge++;
2534 
2535  /* setup SGE's for data blocks associated with DIF data */
2536  pgdone = 0;
2537  subtotal = 0; /* total bytes processed for current prot grp */
2538  while (!pgdone) {
2539  if (!sgde) {
2541  "9086 BLKGRD:%s Invalid data segment\n",
2542  __func__);
2543  return 0;
2544  }
2545  sgl++;
2546  dataphysaddr = sg_dma_address(sgde) + split_offset;
2547 
2548  remainder = sg_dma_len(sgde) - split_offset;
2549 
2550  if ((subtotal + remainder) <= protgrp_bytes) {
2551  /* we can use this whole buffer */
2552  dma_len = remainder;
2553  split_offset = 0;
2554 
2555  if ((subtotal + remainder) == protgrp_bytes)
2556  pgdone = 1;
2557  } else {
2558  /* must split this buffer with next prot grp */
2559  dma_len = protgrp_bytes - subtotal;
2560  split_offset += dma_len;
2561  }
2562 
2563  subtotal += dma_len;
2564 
2565  sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2566  sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2567  bf_set(lpfc_sli4_sge_last, sgl, 0);
2568  bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2569  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2570 
2571  sgl->sge_len = cpu_to_le32(dma_len);
2572  dma_offset += dma_len;
2573 
2574  num_sge++;
2575  curr_data++;
2576 
2577  if (split_offset)
2578  break;
2579 
2580  /* Move to the next s/g segment if possible */
2581  sgde = sg_next(sgde);
2582  }
2583 
2584  if (protgroup_offset) {
2585  /* update the reference tag */
2586  reftag += protgrp_blks;
2587  sgl++;
2588  continue;
2589  }
2590 
2591  /* are we done ? */
2592  if (curr_prot == protcnt) {
2593  bf_set(lpfc_sli4_sge_last, sgl, 1);
2594  alldone = 1;
2595  } else if (curr_prot < protcnt) {
2596  /* advance to next prot buffer */
2597  sgpe = sg_next(sgpe);
2598  sgl++;
2599 
2600  /* update the reference tag */
2601  reftag += protgrp_blks;
2602  } else {
2603  /* if we're here, we have a bug */
2605  "9085 BLKGRD: bug in %s\n", __func__);
2606  }
2607 
2608  } while (!alldone);
2609 
2610 out:
2611 
2612  return num_sge;
2613 }
2614 
2626 static int
2627 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2628 {
2629  int ret = LPFC_PG_TYPE_INVALID;
2630  unsigned char op = scsi_get_prot_op(sc);
2631 
2632  switch (op) {
2633  case SCSI_PROT_READ_STRIP:
2635  ret = LPFC_PG_TYPE_NO_DIF;
2636  break;
2637  case SCSI_PROT_READ_INSERT:
2638  case SCSI_PROT_WRITE_STRIP:
2639  case SCSI_PROT_READ_PASS:
2640  case SCSI_PROT_WRITE_PASS:
2641  ret = LPFC_PG_TYPE_DIF_BUF;
2642  break;
2643  default:
2645  "9021 Unsupported protection op:%d\n", op);
2646  break;
2647  }
2648 
2649  return ret;
2650 }
2651 
2661 static int
2662 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2663  struct lpfc_scsi_buf *lpfc_cmd)
2664 {
2665  struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2666  struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2667  struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2668  IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2669  uint32_t num_bde = 0;
2670  int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2671  int prot_group_type = 0;
2672  int diflen, fcpdl;
2673  unsigned blksize;
2674 
2675  /*
2676  * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2677  * fcp_rsp regions to the first data bde entry
2678  */
2679  bpl += 2;
2680  if (scsi_sg_count(scsi_cmnd)) {
2681  /*
2682  * The driver stores the segment count returned from pci_map_sg
2683  * because this a count of dma-mappings used to map the use_sg
2684  * pages. They are not guaranteed to be the same for those
2685  * architectures that implement an IOMMU.
2686  */
2687  datasegcnt = dma_map_sg(&phba->pcidev->dev,
2688  scsi_sglist(scsi_cmnd),
2689  scsi_sg_count(scsi_cmnd), datadir);
2690  if (unlikely(!datasegcnt))
2691  return 1;
2692 
2693  lpfc_cmd->seg_cnt = datasegcnt;
2694  if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2696  "9067 BLKGRD: %s: Too many sg segments"
2697  " from dma_map_sg. Config %d, seg_cnt"
2698  " %d\n",
2699  __func__, phba->cfg_sg_seg_cnt,
2700  lpfc_cmd->seg_cnt);
2701  scsi_dma_unmap(scsi_cmnd);
2702  return 1;
2703  }
2704 
2705  prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2706 
2707  switch (prot_group_type) {
2708  case LPFC_PG_TYPE_NO_DIF:
2709  num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2710  datasegcnt);
2711  /* we should have 2 or more entries in buffer list */
2712  if (num_bde < 2)
2713  goto err;
2714  break;
2715  case LPFC_PG_TYPE_DIF_BUF:{
2716  /*
2717  * This type indicates that protection buffers are
2718  * passed to the driver, so that needs to be prepared
2719  * for DMA
2720  */
2721  protsegcnt = dma_map_sg(&phba->pcidev->dev,
2722  scsi_prot_sglist(scsi_cmnd),
2723  scsi_prot_sg_count(scsi_cmnd), datadir);
2724  if (unlikely(!protsegcnt)) {
2725  scsi_dma_unmap(scsi_cmnd);
2726  return 1;
2727  }
2728 
2729  lpfc_cmd->prot_seg_cnt = protsegcnt;
2730  if (lpfc_cmd->prot_seg_cnt
2731  > phba->cfg_prot_sg_seg_cnt) {
2733  "9068 BLKGRD: %s: Too many prot sg "
2734  "segments from dma_map_sg. Config %d,"
2735  "prot_seg_cnt %d\n", __func__,
2736  phba->cfg_prot_sg_seg_cnt,
2737  lpfc_cmd->prot_seg_cnt);
2738  dma_unmap_sg(&phba->pcidev->dev,
2739  scsi_prot_sglist(scsi_cmnd),
2740  scsi_prot_sg_count(scsi_cmnd),
2741  datadir);
2742  scsi_dma_unmap(scsi_cmnd);
2743  return 1;
2744  }
2745 
2746  num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2747  datasegcnt, protsegcnt);
2748  /* we should have 3 or more entries in buffer list */
2749  if (num_bde < 3)
2750  goto err;
2751  break;
2752  }
2753  case LPFC_PG_TYPE_INVALID:
2754  default:
2756  "9022 Unexpected protection group %i\n",
2757  prot_group_type);
2758  return 1;
2759  }
2760  }
2761 
2762  /*
2763  * Finish initializing those IOCB fields that are dependent on the
2764  * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2765  * reinitialized since all iocb memory resources are used many times
2766  * for transmit, receive, and continuation bpl's.
2767  */
2768  iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2769  iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2770  iocb_cmd->ulpBdeCount = 1;
2771  iocb_cmd->ulpLe = 1;
2772 
2773  fcpdl = scsi_bufflen(scsi_cmnd);
2774 
2775  if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2776  /*
2777  * We are in DIF Type 1 mode
2778  * Every data block has a 8 byte DIF (trailer)
2779  * attached to it. Must ajust FCP data length
2780  */
2781  blksize = lpfc_cmd_blksize(scsi_cmnd);
2782  diflen = (fcpdl / blksize) * 8;
2783  fcpdl += diflen;
2784  }
2785  fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2786 
2787  /*
2788  * Due to difference in data length between DIF/non-DIF paths,
2789  * we need to set word 4 of IOCB here
2790  */
2791  iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2792 
2793  return 0;
2794 err:
2796  "9023 Could not setup all needed BDE's"
2797  "prot_group_type=%d, num_bde=%d\n",
2798  prot_group_type, num_bde);
2799  return 1;
2800 }
2801 
2802 /*
2803  * This function checks for BlockGuard errors detected by
2804  * the HBA. In case of errors, the ASC/ASCQ fields in the
2805  * sense buffer will be set accordingly, paired with
2806  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2807  * detected corruption.
2808  *
2809  * Returns:
2810  * 0 - No error found
2811  * 1 - BlockGuard error found
2812  * -1 - Internal error (bad profile, ...etc)
2813  */
2814 static int
2815 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2816  struct lpfc_iocbq *pIocbOut)
2817 {
2818  struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2819  struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2820  int ret = 0;
2821  uint32_t bghm = bgf->bghm;
2822  uint32_t bgstat = bgf->bgstat;
2823  uint64_t failing_sector = 0;
2824 
2825  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2826  " 0x%x lba 0x%llx blk cnt 0x%x "
2827  "bgstat=0x%x bghm=0x%x\n",
2828  cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2829  blk_rq_sectors(cmd->request), bgstat, bghm);
2830 
2831  spin_lock(&_dump_buf_lock);
2832  if (!_dump_buf_done) {
2833  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
2834  " Data for %u blocks to debugfs\n",
2835  (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2836  lpfc_debug_save_data(phba, cmd);
2837 
2838  /* If we have a prot sgl, save the DIF buffer */
2839  if (lpfc_prot_group_type(phba, cmd) ==
2841  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2842  "Saving DIF for %u blocks to debugfs\n",
2843  (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2844  lpfc_debug_save_dif(phba, cmd);
2845  }
2846 
2847  _dump_buf_done = 1;
2848  }
2849  spin_unlock(&_dump_buf_lock);
2850 
2851  if (lpfc_bgs_get_invalid_prof(bgstat)) {
2852  cmd->result = ScsiResult(DID_ERROR, 0);
2853  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
2854  " BlockGuard profile. bgstat:0x%x\n",
2855  bgstat);
2856  ret = (-1);
2857  goto out;
2858  }
2859 
2860  if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2861  cmd->result = ScsiResult(DID_ERROR, 0);
2862  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
2863  "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
2864  bgstat);
2865  ret = (-1);
2866  goto out;
2867  }
2868 
2869  if (lpfc_bgs_get_guard_err(bgstat)) {
2870  ret = 1;
2871 
2873  0x10, 0x1);
2874  cmd->result = DRIVER_SENSE << 24
2876  phba->bg_guard_err_cnt++;
2878  "9055 BLKGRD: guard_tag error\n");
2879  }
2880 
2881  if (lpfc_bgs_get_reftag_err(bgstat)) {
2882  ret = 1;
2883 
2885  0x10, 0x3);
2886  cmd->result = DRIVER_SENSE << 24
2888 
2889  phba->bg_reftag_err_cnt++;
2891  "9056 BLKGRD: ref_tag error\n");
2892  }
2893 
2894  if (lpfc_bgs_get_apptag_err(bgstat)) {
2895  ret = 1;
2896 
2898  0x10, 0x2);
2899  cmd->result = DRIVER_SENSE << 24
2901 
2902  phba->bg_apptag_err_cnt++;
2904  "9061 BLKGRD: app_tag error\n");
2905  }
2906 
2907  if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2908  /*
2909  * setup sense data descriptor 0 per SPC-4 as an information
2910  * field, and put the failing LBA in it.
2911  * This code assumes there was also a guard/app/ref tag error
2912  * indication.
2913  */
2914  cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2915  cmd->sense_buffer[8] = 0; /* Information descriptor type */
2916  cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2917  cmd->sense_buffer[10] = 0x80; /* Validity bit */
2918 
2919  /* bghm is a "on the wire" FC frame based count */
2920  switch (scsi_get_prot_op(cmd)) {
2921  case SCSI_PROT_READ_INSERT:
2922  case SCSI_PROT_WRITE_STRIP:
2923  bghm /= cmd->device->sector_size;
2924  break;
2925  case SCSI_PROT_READ_STRIP:
2927  case SCSI_PROT_READ_PASS:
2928  case SCSI_PROT_WRITE_PASS:
2929  bghm /= (cmd->device->sector_size +
2930  sizeof(struct scsi_dif_tuple));
2931  break;
2932  }
2933 
2934  failing_sector = scsi_get_lba(cmd);
2935  failing_sector += bghm;
2936 
2937  /* Descriptor Information */
2938  put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2939  }
2940 
2941  if (!ret) {
2942  /* No error was reported - problem in FW? */
2943  cmd->result = ScsiResult(DID_ERROR, 0);
2945  "9057 BLKGRD: Unknown error reported!\n");
2946  }
2947 
2948 out:
2949  return ret;
2950 }
2951 
2964 static int
2965 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2966 {
2967  struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2968  struct scatterlist *sgel = NULL;
2969  struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2970  struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
2971  struct sli4_sge *first_data_sgl;
2972  IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2973  dma_addr_t physaddr;
2974  uint32_t num_bde = 0;
2975  uint32_t dma_len;
2976  uint32_t dma_offset = 0;
2977  int nseg;
2978  struct ulp_bde64 *bde;
2979 
2980  /*
2981  * There are three possibilities here - use scatter-gather segment, use
2982  * the single mapping, or neither. Start the lpfc command prep by
2983  * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2984  * data bde entry.
2985  */
2986  if (scsi_sg_count(scsi_cmnd)) {
2987  /*
2988  * The driver stores the segment count returned from pci_map_sg
2989  * because this a count of dma-mappings used to map the use_sg
2990  * pages. They are not guaranteed to be the same for those
2991  * architectures that implement an IOMMU.
2992  */
2993 
2994  nseg = scsi_dma_map(scsi_cmnd);
2995  if (unlikely(!nseg))
2996  return 1;
2997  sgl += 1;
2998  /* clear the last flag in the fcp_rsp map entry */
2999  sgl->word2 = le32_to_cpu(sgl->word2);
3000  bf_set(lpfc_sli4_sge_last, sgl, 0);
3001  sgl->word2 = cpu_to_le32(sgl->word2);
3002  sgl += 1;
3003  first_data_sgl = sgl;
3004  lpfc_cmd->seg_cnt = nseg;
3005  if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3006  lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3007  " %s: Too many sg segments from "
3008  "dma_map_sg. Config %d, seg_cnt %d\n",
3009  __func__, phba->cfg_sg_seg_cnt,
3010  lpfc_cmd->seg_cnt);
3011  scsi_dma_unmap(scsi_cmnd);
3012  return 1;
3013  }
3014 
3015  /*
3016  * The driver established a maximum scatter-gather segment count
3017  * during probe that limits the number of sg elements in any
3018  * single scsi command. Just run through the seg_cnt and format
3019  * the sge's.
3020  * When using SLI-3 the driver will try to fit all the BDEs into
3021  * the IOCB. If it can't then the BDEs get added to a BPL as it
3022  * does for SLI-2 mode.
3023  */
3024  scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3025  physaddr = sg_dma_address(sgel);
3026  dma_len = sg_dma_len(sgel);
3027  sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3028  sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3029  sgl->word2 = le32_to_cpu(sgl->word2);
3030  if ((num_bde + 1) == nseg)
3031  bf_set(lpfc_sli4_sge_last, sgl, 1);
3032  else
3033  bf_set(lpfc_sli4_sge_last, sgl, 0);
3034  bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3035  bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3036  sgl->word2 = cpu_to_le32(sgl->word2);
3037  sgl->sge_len = cpu_to_le32(dma_len);
3038  dma_offset += dma_len;
3039  sgl++;
3040  }
3041  /* setup the performance hint (first data BDE) if enabled */
3042  if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3043  bde = (struct ulp_bde64 *)
3044  &(iocb_cmd->unsli3.sli3Words[5]);
3045  bde->addrLow = first_data_sgl->addr_lo;
3046  bde->addrHigh = first_data_sgl->addr_hi;
3047  bde->tus.f.bdeSize =
3048  le32_to_cpu(first_data_sgl->sge_len);
3049  bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3050  bde->tus.w = cpu_to_le32(bde->tus.w);
3051  }
3052  } else {
3053  sgl += 1;
3054  /* clear the last flag in the fcp_rsp map entry */
3055  sgl->word2 = le32_to_cpu(sgl->word2);
3056  bf_set(lpfc_sli4_sge_last, sgl, 1);
3057  sgl->word2 = cpu_to_le32(sgl->word2);
3058  }
3059 
3060  /*
3061  * Finish initializing those IOCB fields that are dependent on the
3062  * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3063  * explicitly reinitialized.
3064  * all iocb memory resources are reused.
3065  */
3066  fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3067 
3068  /*
3069  * Due to difference in data length between DIF/non-DIF paths,
3070  * we need to set word 4 of IOCB here
3071  */
3072  iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3073  return 0;
3074 }
3075 
3086 static int
3087 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3088  struct lpfc_scsi_buf *lpfc_cmd)
3089 {
3090  struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3091  int diflen, fcpdl;
3092  unsigned blksize;
3093 
3094  fcpdl = scsi_bufflen(sc);
3095 
3096  /* Check if there is protection data on the wire */
3097  if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3098  /* Read */
3099  if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3100  return fcpdl;
3101 
3102  } else {
3103  /* Write */
3104  if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3105  return fcpdl;
3106  }
3107 
3108  /* If protection data on the wire, adjust the count accordingly */
3109  blksize = lpfc_cmd_blksize(sc);
3110  diflen = (fcpdl / blksize) * 8;
3111  fcpdl += diflen;
3112  return fcpdl;
3113 }
3114 
3124 static int
3125 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3126  struct lpfc_scsi_buf *lpfc_cmd)
3127 {
3128  struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3129  struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3130  struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3131  IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3132  uint32_t num_bde = 0;
3133  int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3134  int prot_group_type = 0;
3135  int fcpdl;
3136 
3137  /*
3138  * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3139  * fcp_rsp regions to the first data bde entry
3140  */
3141  if (scsi_sg_count(scsi_cmnd)) {
3142  /*
3143  * The driver stores the segment count returned from pci_map_sg
3144  * because this a count of dma-mappings used to map the use_sg
3145  * pages. They are not guaranteed to be the same for those
3146  * architectures that implement an IOMMU.
3147  */
3148  datasegcnt = dma_map_sg(&phba->pcidev->dev,
3149  scsi_sglist(scsi_cmnd),
3150  scsi_sg_count(scsi_cmnd), datadir);
3151  if (unlikely(!datasegcnt))
3152  return 1;
3153 
3154  sgl += 1;
3155  /* clear the last flag in the fcp_rsp map entry */
3156  sgl->word2 = le32_to_cpu(sgl->word2);
3157  bf_set(lpfc_sli4_sge_last, sgl, 0);
3158  sgl->word2 = cpu_to_le32(sgl->word2);
3159 
3160  sgl += 1;
3161  lpfc_cmd->seg_cnt = datasegcnt;
3162  if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3164  "9087 BLKGRD: %s: Too many sg segments"
3165  " from dma_map_sg. Config %d, seg_cnt"
3166  " %d\n",
3167  __func__, phba->cfg_sg_seg_cnt,
3168  lpfc_cmd->seg_cnt);
3169  scsi_dma_unmap(scsi_cmnd);
3170  return 1;
3171  }
3172 
3173  prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3174 
3175  switch (prot_group_type) {
3176  case LPFC_PG_TYPE_NO_DIF:
3177  num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3178  datasegcnt);
3179  /* we should have 2 or more entries in buffer list */
3180  if (num_bde < 2)
3181  goto err;
3182  break;
3183  case LPFC_PG_TYPE_DIF_BUF:{
3184  /*
3185  * This type indicates that protection buffers are
3186  * passed to the driver, so that needs to be prepared
3187  * for DMA
3188  */
3189  protsegcnt = dma_map_sg(&phba->pcidev->dev,
3190  scsi_prot_sglist(scsi_cmnd),
3191  scsi_prot_sg_count(scsi_cmnd), datadir);
3192  if (unlikely(!protsegcnt)) {
3193  scsi_dma_unmap(scsi_cmnd);
3194  return 1;
3195  }
3196 
3197  lpfc_cmd->prot_seg_cnt = protsegcnt;
3198  if (lpfc_cmd->prot_seg_cnt
3199  > phba->cfg_prot_sg_seg_cnt) {
3201  "9088 BLKGRD: %s: Too many prot sg "
3202  "segments from dma_map_sg. Config %d,"
3203  "prot_seg_cnt %d\n", __func__,
3204  phba->cfg_prot_sg_seg_cnt,
3205  lpfc_cmd->prot_seg_cnt);
3206  dma_unmap_sg(&phba->pcidev->dev,
3207  scsi_prot_sglist(scsi_cmnd),
3208  scsi_prot_sg_count(scsi_cmnd),
3209  datadir);
3210  scsi_dma_unmap(scsi_cmnd);
3211  return 1;
3212  }
3213 
3214  num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3215  datasegcnt, protsegcnt);
3216  /* we should have 3 or more entries in buffer list */
3217  if (num_bde < 3)
3218  goto err;
3219  break;
3220  }
3221  case LPFC_PG_TYPE_INVALID:
3222  default:
3224  "9083 Unexpected protection group %i\n",
3225  prot_group_type);
3226  return 1;
3227  }
3228  }
3229 
3230  fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3231 
3232  fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3233 
3234  /*
3235  * Due to difference in data length between DIF/non-DIF paths,
3236  * we need to set word 4 of IOCB here
3237  */
3238  iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3239  lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
3240 
3241  return 0;
3242 err:
3244  "9084 Could not setup all needed BDE's"
3245  "prot_group_type=%d, num_bde=%d\n",
3246  prot_group_type, num_bde);
3247  return 1;
3248 }
3249 
3262 static inline int
3263 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3264 {
3265  return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3266 }
3267 
3281 static inline int
3282 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3283 {
3284  return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3285 }
3286 
3297 static void
3298 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3299  struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3300  struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3301  struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3302  uint32_t resp_info = fcprsp->rspStatus2;
3303  uint32_t scsi_status = fcprsp->rspStatus3;
3304  uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3305  struct lpfc_fast_path_event *fast_path_evt = NULL;
3306  struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3307  unsigned long flags;
3308 
3309  if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3310  return;
3311 
3312  /* If there is queuefull or busy condition send a scsi event */
3313  if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3314  (cmnd->result == SAM_STAT_BUSY)) {
3315  fast_path_evt = lpfc_alloc_fast_evt(phba);
3316  if (!fast_path_evt)
3317  return;
3318  fast_path_evt->un.scsi_evt.event_type =
3320  fast_path_evt->un.scsi_evt.subcategory =
3321  (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3323  fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3324  memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3325  &pnode->nlp_portname, sizeof(struct lpfc_name));
3326  memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3327  &pnode->nlp_nodename, sizeof(struct lpfc_name));
3328  } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3329  ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3330  fast_path_evt = lpfc_alloc_fast_evt(phba);
3331  if (!fast_path_evt)
3332  return;
3333  fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3335  fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3337  fast_path_evt->un.check_cond_evt.scsi_event.lun =
3338  cmnd->device->lun;
3339  memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3340  &pnode->nlp_portname, sizeof(struct lpfc_name));
3341  memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3342  &pnode->nlp_nodename, sizeof(struct lpfc_name));
3343  fast_path_evt->un.check_cond_evt.sense_key =
3344  cmnd->sense_buffer[2] & 0xf;
3345  fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3346  fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3347  } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3348  fcpi_parm &&
3349  ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3350  ((scsi_status == SAM_STAT_GOOD) &&
3351  !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3352  /*
3353  * If status is good or resid does not match with fcp_param and
3354  * there is valid fcpi_parm, then there is a read_check error
3355  */
3356  fast_path_evt = lpfc_alloc_fast_evt(phba);
3357  if (!fast_path_evt)
3358  return;
3359  fast_path_evt->un.read_check_error.header.event_type =
3361  fast_path_evt->un.read_check_error.header.subcategory =
3363  memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3364  &pnode->nlp_portname, sizeof(struct lpfc_name));
3365  memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3366  &pnode->nlp_nodename, sizeof(struct lpfc_name));
3367  fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3368  fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3369  fast_path_evt->un.read_check_error.fcpiparam =
3370  fcpi_parm;
3371  } else
3372  return;
3373 
3374  fast_path_evt->vport = vport;
3375  spin_lock_irqsave(&phba->hbalock, flags);
3376  list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3377  spin_unlock_irqrestore(&phba->hbalock, flags);
3378  lpfc_worker_wake_up(phba);
3379  return;
3380 }
3381 
3390 static void
3391 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3392 {
3393  /*
3394  * There are only two special cases to consider. (1) the scsi command
3395  * requested scatter-gather usage or (2) the scsi command allocated
3396  * a request buffer, but did not request use_sg. There is a third
3397  * case, but it does not require resource deallocation.
3398  */
3399  if (psb->seg_cnt > 0)
3400  scsi_dma_unmap(psb->pCmd);
3401  if (psb->prot_seg_cnt > 0)
3402  dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3403  scsi_prot_sg_count(psb->pCmd),
3404  psb->pCmd->sc_data_direction);
3405 }
3406 
3417 static void
3418 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3419  struct lpfc_iocbq *rsp_iocb)
3420 {
3421  struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3422  struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3423  struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3424  uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3425  uint32_t resp_info = fcprsp->rspStatus2;
3426  uint32_t scsi_status = fcprsp->rspStatus3;
3427  uint32_t *lp;
3429  uint32_t rsplen = 0;
3430  uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3431 
3432 
3433  /*
3434  * If this is a task management command, there is no
3435  * scsi packet associated with this lpfc_cmd. The driver
3436  * consumes it.
3437  */
3438  if (fcpcmd->fcpCntl2) {
3439  scsi_status = 0;
3440  goto out;
3441  }
3442 
3443  if (resp_info & RSP_LEN_VALID) {
3444  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3445  if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3447  "2719 Invalid response length: "
3448  "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3449  cmnd->device->id,
3450  cmnd->device->lun, cmnd->cmnd[0],
3451  rsplen);
3452  host_status = DID_ERROR;
3453  goto out;
3454  }
3455  if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3457  "2757 Protocol failure detected during "
3458  "processing of FCP I/O op: "
3459  "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3460  cmnd->device->id,
3461  cmnd->device->lun, cmnd->cmnd[0],
3462  fcprsp->rspInfo3);
3463  host_status = DID_ERROR;
3464  goto out;
3465  }
3466  }
3467 
3468  if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3469  uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3470  if (snslen > SCSI_SENSE_BUFFERSIZE)
3471  snslen = SCSI_SENSE_BUFFERSIZE;
3472 
3473  if (resp_info & RSP_LEN_VALID)
3474  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3475  memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3476  }
3477  lp = (uint32_t *)cmnd->sense_buffer;
3478 
3479  /* special handling for under run conditions */
3480  if (!scsi_status && (resp_info & RESID_UNDER)) {
3481  /* don't log under runs if fcp set... */
3482  if (vport->cfg_log_verbose & LOG_FCP)
3483  logit = LOG_FCP_ERROR;
3484  /* unless operator says so */
3485  if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3486  logit = LOG_FCP_UNDER;
3487  }
3488 
3489  lpfc_printf_vlog(vport, KERN_WARNING, logit,
3490  "9024 FCP command x%x failed: x%x SNS x%x x%x "
3491  "Data: x%x x%x x%x x%x x%x\n",
3492  cmnd->cmnd[0], scsi_status,
3493  be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3494  be32_to_cpu(fcprsp->rspResId),
3495  be32_to_cpu(fcprsp->rspSnsLen),
3496  be32_to_cpu(fcprsp->rspRspLen),
3497  fcprsp->rspInfo3);
3498 
3499  scsi_set_resid(cmnd, 0);
3500  if (resp_info & RESID_UNDER) {
3501  scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3502 
3504  "9025 FCP Read Underrun, expected %d, "
3505  "residual %d Data: x%x x%x x%x\n",
3506  be32_to_cpu(fcpcmd->fcpDl),
3507  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3508  cmnd->underflow);
3509 
3510  /*
3511  * If there is an under run check if under run reported by
3512  * storage array is same as the under run reported by HBA.
3513  * If this is not same, there is a dropped frame.
3514  */
3515  if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3516  fcpi_parm &&
3517  (scsi_get_resid(cmnd) != fcpi_parm)) {
3520  "9026 FCP Read Check Error "
3521  "and Underrun Data: x%x x%x x%x x%x\n",
3522  be32_to_cpu(fcpcmd->fcpDl),
3523  scsi_get_resid(cmnd), fcpi_parm,
3524  cmnd->cmnd[0]);
3525  scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3526  host_status = DID_ERROR;
3527  }
3528  /*
3529  * The cmnd->underflow is the minimum number of bytes that must
3530  * be transferred for this command. Provided a sense condition
3531  * is not present, make sure the actual amount transferred is at
3532  * least the underflow value or fail.
3533  */
3534  if (!(resp_info & SNS_LEN_VALID) &&
3535  (scsi_status == SAM_STAT_GOOD) &&
3536  (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3537  < cmnd->underflow)) {
3539  "9027 FCP command x%x residual "
3540  "underrun converted to error "
3541  "Data: x%x x%x x%x\n",
3542  cmnd->cmnd[0], scsi_bufflen(cmnd),
3543  scsi_get_resid(cmnd), cmnd->underflow);
3544  host_status = DID_ERROR;
3545  }
3546  } else if (resp_info & RESID_OVER) {
3548  "9028 FCP command x%x residual overrun error. "
3549  "Data: x%x x%x\n", cmnd->cmnd[0],
3550  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3551  host_status = DID_ERROR;
3552 
3553  /*
3554  * Check SLI validation that all the transfer was actually done
3555  * (fcpi_parm should be zero).
3556  */
3557  } else if (fcpi_parm) {
3559  "9029 FCP Data Transfer Check Error: "
3560  "x%x x%x x%x x%x x%x\n",
3561  be32_to_cpu(fcpcmd->fcpDl),
3562  be32_to_cpu(fcprsp->rspResId),
3563  fcpi_parm, cmnd->cmnd[0], scsi_status);
3564  switch (scsi_status) {
3565  case SAM_STAT_GOOD:
3567  /* Fabric dropped a data frame. Fail any successful
3568  * command in which we detected dropped frames.
3569  * A status of good or some check conditions could
3570  * be considered a successful command.
3571  */
3572  host_status = DID_ERROR;
3573  break;
3574  }
3575  scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3576  }
3577 
3578  out:
3579  cmnd->result = ScsiResult(host_status, scsi_status);
3580  lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3581 }
3582 
3593 static void
3594 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3595  struct lpfc_iocbq *pIocbOut)
3596 {
3597  struct lpfc_scsi_buf *lpfc_cmd =
3598  (struct lpfc_scsi_buf *) pIocbIn->context1;
3599  struct lpfc_vport *vport = pIocbIn->vport;
3600  struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3601  struct lpfc_nodelist *pnode = rdata->pnode;
3602  struct scsi_cmnd *cmd;
3603  int result;
3604  struct scsi_device *tmp_sdev;
3605  int depth;
3606  unsigned long flags;
3607  struct lpfc_fast_path_event *fast_path_evt;
3608  struct Scsi_Host *shost;
3609  uint32_t queue_depth, scsi_id;
3610  uint32_t logit = LOG_FCP;
3611 
3612  /* Sanity check on return of outstanding command */
3613  if (!(lpfc_cmd->pCmd))
3614  return;
3615  cmd = lpfc_cmd->pCmd;
3616  shost = cmd->device->host;
3617 
3618  lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3619  lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3620  /* pick up SLI4 exhange busy status from HBA */
3621  lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3622 
3623 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3624  if (lpfc_cmd->prot_data_type) {
3625  struct scsi_dif_tuple *src = NULL;
3626 
3627  src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3628  /*
3629  * Used to restore any changes to protection
3630  * data for error injection.
3631  */
3632  switch (lpfc_cmd->prot_data_type) {
3633  case LPFC_INJERR_REFTAG:
3634  src->ref_tag =
3635  lpfc_cmd->prot_data;
3636  break;
3637  case LPFC_INJERR_APPTAG:
3638  src->app_tag =
3639  (uint16_t)lpfc_cmd->prot_data;
3640  break;
3641  case LPFC_INJERR_GUARD:
3642  src->guard_tag =
3643  (uint16_t)lpfc_cmd->prot_data;
3644  break;
3645  default:
3646  break;
3647  }
3648 
3649  lpfc_cmd->prot_data = 0;
3650  lpfc_cmd->prot_data_type = 0;
3651  lpfc_cmd->prot_data_segment = NULL;
3652  }
3653 #endif
3654  if (pnode && NLP_CHK_NODE_ACT(pnode))
3655  atomic_dec(&pnode->cmd_pending);
3656 
3657  if (lpfc_cmd->status) {
3658  if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3659  (lpfc_cmd->result & IOERR_DRVR_MASK))
3660  lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3661  else if (lpfc_cmd->status >= IOSTAT_CNT)
3662  lpfc_cmd->status = IOSTAT_DEFAULT;
3663  if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3664  !lpfc_cmd->fcp_rsp->rspStatus3 &&
3665  (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3666  !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3667  logit = 0;
3668  else
3669  logit = LOG_FCP | LOG_FCP_UNDER;
3670  lpfc_printf_vlog(vport, KERN_WARNING, logit,
3671  "9030 FCP cmd x%x failed <%d/%d> "
3672  "status: x%x result: x%x "
3673  "sid: x%x did: x%x oxid: x%x "
3674  "Data: x%x x%x\n",
3675  cmd->cmnd[0],
3676  cmd->device ? cmd->device->id : 0xffff,
3677  cmd->device ? cmd->device->lun : 0xffff,
3678  lpfc_cmd->status, lpfc_cmd->result,
3679  vport->fc_myDID, pnode->nlp_DID,
3680  phba->sli_rev == LPFC_SLI_REV4 ?
3681  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3682  pIocbOut->iocb.ulpContext,
3683  lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3684 
3685  switch (lpfc_cmd->status) {
3686  case IOSTAT_FCP_RSP_ERROR:
3687  /* Call FCP RSP handler to determine result */
3688  lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3689  break;
3690  case IOSTAT_NPORT_BSY:
3691  case IOSTAT_FABRIC_BSY:
3693  fast_path_evt = lpfc_alloc_fast_evt(phba);
3694  if (!fast_path_evt)
3695  break;
3696  fast_path_evt->un.fabric_evt.event_type =
3698  fast_path_evt->un.fabric_evt.subcategory =
3699  (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3701  if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3702  memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3703  &pnode->nlp_portname,
3704  sizeof(struct lpfc_name));
3705  memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3706  &pnode->nlp_nodename,
3707  sizeof(struct lpfc_name));
3708  }
3709  fast_path_evt->vport = vport;
3710  fast_path_evt->work_evt.evt =
3712  spin_lock_irqsave(&phba->hbalock, flags);
3713  list_add_tail(&fast_path_evt->work_evt.evt_listp,
3714  &phba->work_list);
3715  spin_unlock_irqrestore(&phba->hbalock, flags);
3716  lpfc_worker_wake_up(phba);
3717  break;
3718  case IOSTAT_LOCAL_REJECT:
3719  case IOSTAT_REMOTE_STOP:
3720  if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3721  lpfc_cmd->result ==
3723  lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3724  lpfc_cmd->result ==
3726  cmd->result = ScsiResult(DID_NO_CONNECT, 0);
3727  break;
3728  }
3729  if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3730  lpfc_cmd->result == IOERR_NO_RESOURCES ||
3731  lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3732  lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3733  cmd->result = ScsiResult(DID_REQUEUE, 0);
3734  break;
3735  }
3736  if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3737  lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3738  pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3739  if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3740  /*
3741  * This is a response for a BG enabled
3742  * cmd. Parse BG error
3743  */
3744  lpfc_parse_bg_err(phba, lpfc_cmd,
3745  pIocbOut);
3746  break;
3747  } else {
3749  LOG_BG,
3750  "9031 non-zero BGSTAT "
3751  "on unprotected cmd\n");
3752  }
3753  }
3754  if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3755  && (phba->sli_rev == LPFC_SLI_REV4)
3756  && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3757  /* This IO was aborted by the target, we don't
3758  * know the rxid and because we did not send the
3759  * ABTS we cannot generate and RRQ.
3760  */
3761  lpfc_set_rrq_active(phba, pnode,
3762  lpfc_cmd->cur_iocbq.sli4_lxritag,
3763  0, 0);
3764  }
3765  /* else: fall through */
3766  default:
3767  cmd->result = ScsiResult(DID_ERROR, 0);
3768  break;
3769  }
3770 
3771  if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3772  || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3774  SAM_STAT_BUSY);
3775  } else
3776  cmd->result = ScsiResult(DID_OK, 0);
3777 
3778  if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3779  uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3780 
3782  "0710 Iodone <%d/%d> cmd %p, error "
3783  "x%x SNS x%x x%x Data: x%x x%x\n",
3784  cmd->device->id, cmd->device->lun, cmd,
3785  cmd->result, *lp, *(lp + 3), cmd->retries,
3786  scsi_get_resid(cmd));
3787  }
3788 
3789  lpfc_update_stats(phba, lpfc_cmd);
3790  result = cmd->result;
3791  if (vport->cfg_max_scsicmpl_time &&
3792  time_after(jiffies, lpfc_cmd->start_time +
3794  spin_lock_irqsave(shost->host_lock, flags);
3795  if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3796  if (pnode->cmd_qdepth >
3797  atomic_read(&pnode->cmd_pending) &&
3798  (atomic_read(&pnode->cmd_pending) >
3800  ((cmd->cmnd[0] == READ_10) ||
3801  (cmd->cmnd[0] == WRITE_10)))
3802  pnode->cmd_qdepth =
3803  atomic_read(&pnode->cmd_pending);
3804 
3805  pnode->last_change_time = jiffies;
3806  }
3807  spin_unlock_irqrestore(shost->host_lock, flags);
3808  } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3809  if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
3810  time_after(jiffies, pnode->last_change_time +
3812  spin_lock_irqsave(shost->host_lock, flags);
3814  / 100;
3815  depth = depth ? depth : 1;
3816  pnode->cmd_qdepth += depth;
3817  if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
3818  pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
3819  pnode->last_change_time = jiffies;
3820  spin_unlock_irqrestore(shost->host_lock, flags);
3821  }
3822  }
3823 
3824  lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3825 
3826  /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3827  queue_depth = cmd->device->queue_depth;
3828  scsi_id = cmd->device->id;
3829  cmd->scsi_done(cmd);
3830 
3831  if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3832  spin_lock_irqsave(&phba->hbalock, flags);
3833  lpfc_cmd->pCmd = NULL;
3834  spin_unlock_irqrestore(&phba->hbalock, flags);
3835 
3836  /*
3837  * If there is a thread waiting for command completion
3838  * wake up the thread.
3839  */
3840  spin_lock_irqsave(shost->host_lock, flags);
3841  if (lpfc_cmd->waitq)
3842  wake_up(lpfc_cmd->waitq);
3843  spin_unlock_irqrestore(shost->host_lock, flags);
3844  lpfc_release_scsi_buf(phba, lpfc_cmd);
3845  return;
3846  }
3847 
3848  if (!result)
3849  lpfc_rampup_queue_depth(vport, queue_depth);
3850 
3851  /*
3852  * Check for queue full. If the lun is reporting queue full, then
3853  * back off the lun queue depth to prevent target overloads.
3854  */
3855  if (result == SAM_STAT_TASK_SET_FULL && pnode &&
3856  NLP_CHK_NODE_ACT(pnode)) {
3857  shost_for_each_device(tmp_sdev, shost) {
3858  if (tmp_sdev->id != scsi_id)
3859  continue;
3860  depth = scsi_track_queue_full(tmp_sdev,
3861  tmp_sdev->queue_depth-1);
3862  if (depth <= 0)
3863  continue;
3865  "0711 detected queue full - lun queue "
3866  "depth adjusted to %d.\n", depth);
3867  lpfc_send_sdev_queuedepth_change_event(phba, vport,
3868  pnode,
3869  tmp_sdev->lun,
3870  depth+1, depth);
3871  }
3872  }
3873 
3874  spin_lock_irqsave(&phba->hbalock, flags);
3875  lpfc_cmd->pCmd = NULL;
3876  spin_unlock_irqrestore(&phba->hbalock, flags);
3877 
3878  /*
3879  * If there is a thread waiting for command completion
3880  * wake up the thread.
3881  */
3882  spin_lock_irqsave(shost->host_lock, flags);
3883  if (lpfc_cmd->waitq)
3884  wake_up(lpfc_cmd->waitq);
3885  spin_unlock_irqrestore(shost->host_lock, flags);
3886 
3887  lpfc_release_scsi_buf(phba, lpfc_cmd);
3888 }
3889 
3898 static void
3899 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3900 {
3901  int i, j;
3902  for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3903  i += sizeof(uint32_t), j++) {
3904  ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3905  }
3906 }
3907 
3917 static void
3918 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3919  struct lpfc_nodelist *pnode)
3920 {
3921  struct lpfc_hba *phba = vport->phba;
3922  struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3923  struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3924  IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3925  struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3926  int datadir = scsi_cmnd->sc_data_direction;
3927  char tag[2];
3928  uint8_t *ptr;
3929  bool sli4;
3930 
3931  if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3932  return;
3933 
3934  lpfc_cmd->fcp_rsp->rspSnsLen = 0;
3935  /* clear task management bits */
3936  lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
3937 
3938  int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3939  &lpfc_cmd->fcp_cmnd->fcp_lun);
3940 
3941  ptr = &fcp_cmnd->fcpCdb[0];
3942  memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3943  if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3944  ptr += scsi_cmnd->cmd_len;
3945  memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3946  }
3947 
3948  if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3949  switch (tag[0]) {
3950  case HEAD_OF_QUEUE_TAG:
3951  fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
3952  break;
3953  case ORDERED_QUEUE_TAG:
3954  fcp_cmnd->fcpCntl1 = ORDERED_Q;
3955  break;
3956  default:
3957  fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3958  break;
3959  }
3960  } else
3961  fcp_cmnd->fcpCntl1 = 0;
3962 
3963  sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3964 
3965  /*
3966  * There are three possibilities here - use scatter-gather segment, use
3967  * the single mapping, or neither. Start the lpfc command prep by
3968  * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3969  * data bde entry.
3970  */
3971  if (scsi_sg_count(scsi_cmnd)) {
3972  if (datadir == DMA_TO_DEVICE) {
3973  iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3974  if (sli4)
3975  iocb_cmd->ulpPU = PARM_READ_CHECK;
3976  else {
3977  iocb_cmd->un.fcpi.fcpi_parm = 0;
3978  iocb_cmd->ulpPU = 0;
3979  }
3980  fcp_cmnd->fcpCntl3 = WRITE_DATA;
3981  phba->fc4OutputRequests++;
3982  } else {
3983  iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
3984  iocb_cmd->ulpPU = PARM_READ_CHECK;
3985  fcp_cmnd->fcpCntl3 = READ_DATA;
3986  phba->fc4InputRequests++;
3987  }
3988  } else {
3989  iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
3990  iocb_cmd->un.fcpi.fcpi_parm = 0;
3991  iocb_cmd->ulpPU = 0;
3992  fcp_cmnd->fcpCntl3 = 0;
3993  phba->fc4ControlRequests++;
3994  }
3995  if (phba->sli_rev == 3 &&
3997  lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
3998  /*
3999  * Finish initializing those IOCB fields that are independent
4000  * of the scsi_cmnd request_buffer
4001  */
4002  piocbq->iocb.ulpContext = pnode->nlp_rpi;
4003  if (sli4)
4004  piocbq->iocb.ulpContext =
4005  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4006  if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4007  piocbq->iocb.ulpFCP2Rcvy = 1;
4008  else
4009  piocbq->iocb.ulpFCP2Rcvy = 0;
4010 
4011  piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4012  piocbq->context1 = lpfc_cmd;
4013  piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4014  piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4015  piocbq->vport = vport;
4016 }
4017 
4032 static int
4033 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4034  struct lpfc_scsi_buf *lpfc_cmd,
4035  unsigned int lun,
4036  uint8_t task_mgmt_cmd)
4037 {
4038  struct lpfc_iocbq *piocbq;
4039  IOCB_t *piocb;
4040  struct fcp_cmnd *fcp_cmnd;
4041  struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4042  struct lpfc_nodelist *ndlp = rdata->pnode;
4043 
4044  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4045  ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4046  return 0;
4047 
4048  piocbq = &(lpfc_cmd->cur_iocbq);
4049  piocbq->vport = vport;
4050 
4051  piocb = &piocbq->iocb;
4052 
4053  fcp_cmnd = lpfc_cmd->fcp_cmnd;
4054  /* Clear out any old data in the FCP command area */
4055  memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4056  int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4057  fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4058  if (vport->phba->sli_rev == 3 &&
4059  !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4060  lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4061  piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4062  piocb->ulpContext = ndlp->nlp_rpi;
4063  if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4064  piocb->ulpContext =
4065  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4066  }
4067  if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4068  piocb->ulpFCP2Rcvy = 1;
4069  }
4070  piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4071 
4072  /* ulpTimeout is only one byte */
4073  if (lpfc_cmd->timeout > 0xff) {
4074  /*
4075  * Do not timeout the command at the firmware level.
4076  * The driver will provide the timeout mechanism.
4077  */
4078  piocb->ulpTimeout = 0;
4079  } else
4080  piocb->ulpTimeout = lpfc_cmd->timeout;
4081 
4082  if (vport->phba->sli_rev == LPFC_SLI_REV4)
4083  lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4084 
4085  return 1;
4086 }
4087 
4097 int
4099 {
4100 
4101  phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4102  phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4103 
4104  switch (dev_grp) {
4105  case LPFC_PCI_DEV_LP:
4106  phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4107  phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4108  phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4109  phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4110  phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4111  break;
4112  case LPFC_PCI_DEV_OC:
4113  phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4114  phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4115  phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4116  phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4117  phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4118  break;
4119  default:
4121  "1418 Invalid HBA PCI-device group: 0x%x\n",
4122  dev_grp);
4123  return -ENODEV;
4124  break;
4125  }
4127  phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4128  return 0;
4129 }
4130 
4140 static void
4141 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4142  struct lpfc_iocbq *cmdiocbq,
4143  struct lpfc_iocbq *rspiocbq)
4144 {
4145  struct lpfc_scsi_buf *lpfc_cmd =
4146  (struct lpfc_scsi_buf *) cmdiocbq->context1;
4147  if (lpfc_cmd)
4148  lpfc_release_scsi_buf(phba, lpfc_cmd);
4149  return;
4150 }
4151 
4161 const char *
4163 {
4164  struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4165  struct lpfc_hba *phba = vport->phba;
4166  int len, link_speed = 0;
4167  static char lpfcinfobuf[384];
4168 
4169  memset(lpfcinfobuf,0,384);
4170  if (phba && phba->pcidev){
4171  strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4172  len = strlen(lpfcinfobuf);
4173  snprintf(lpfcinfobuf + len,
4174  384-len,
4175  " on PCI bus %02x device %02x irq %d",
4176  phba->pcidev->bus->number,
4177  phba->pcidev->devfn,
4178  phba->pcidev->irq);
4179  len = strlen(lpfcinfobuf);
4180  if (phba->Port[0]) {
4181  snprintf(lpfcinfobuf + len,
4182  384-len,
4183  " port %s",
4184  phba->Port);
4185  }
4186  len = strlen(lpfcinfobuf);
4187  if (phba->sli_rev <= LPFC_SLI_REV3) {
4189  } else {
4190  if (phba->sli4_hba.link_state.logical_speed)
4191  link_speed =
4192  phba->sli4_hba.link_state.logical_speed;
4193  else
4194  link_speed = phba->sli4_hba.link_state.speed;
4195  }
4196  if (link_speed != 0)
4197  snprintf(lpfcinfobuf + len, 384-len,
4198  " Logical Link Speed: %d Mbps", link_speed);
4199  }
4200  return lpfcinfobuf;
4201 }
4202 
4210 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4211 {
4212  unsigned long poll_tmo_expires =
4214 
4215  if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
4216  mod_timer(&phba->fcp_poll_timer,
4217  poll_tmo_expires);
4218 }
4219 
4226 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4227 {
4228  lpfc_poll_rearm_timer(phba);
4229 }
4230 
4239 void lpfc_poll_timeout(unsigned long ptr)
4240 {
4241  struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4242 
4243  if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4245  &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4246 
4247  if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4248  lpfc_poll_rearm_timer(phba);
4249  }
4250 }
4251 
4265 static int
4266 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4267 {
4268  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4269  struct lpfc_hba *phba = vport->phba;
4270  struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4271  struct lpfc_nodelist *ndlp;
4272  struct lpfc_scsi_buf *lpfc_cmd;
4273  struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4274  int err;
4275 
4276  err = fc_remote_port_chkready(rport);
4277  if (err) {
4278  cmnd->result = err;
4279  goto out_fail_command;
4280  }
4281  ndlp = rdata->pnode;
4282 
4283  if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4284  (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4285 
4287  "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4288  " op:%02x str=%s without registering for"
4289  " BlockGuard - Rejecting command\n",
4290  cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4291  dif_op_str[scsi_get_prot_op(cmnd)]);
4292  goto out_fail_command;
4293  }
4294 
4295  /*
4296  * Catch race where our node has transitioned, but the
4297  * transport is still transitioning.
4298  */
4299  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4300  goto out_tgt_busy;
4301  if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4302  goto out_tgt_busy;
4303 
4304  lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4305  if (lpfc_cmd == NULL) {
4307 
4309  "0707 driver's buffer pool is empty, "
4310  "IO busied\n");
4311  goto out_host_busy;
4312  }
4313 
4314  /*
4315  * Store the midlayer's command structure for the completion phase
4316  * and complete the command initialization.
4317  */
4318  lpfc_cmd->pCmd = cmnd;
4319  lpfc_cmd->rdata = rdata;
4320  lpfc_cmd->timeout = 0;
4321  lpfc_cmd->start_time = jiffies;
4322  cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4323 
4324  if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4325  if (vport->phba->cfg_enable_bg) {
4327  "9033 BLKGRD: rcvd %s cmd:x%x "
4328  "sector x%llx cnt %u pt %x\n",
4329  dif_op_str[scsi_get_prot_op(cmnd)],
4330  cmnd->cmnd[0],
4331  (unsigned long long)scsi_get_lba(cmnd),
4332  blk_rq_sectors(cmnd->request),
4333  (cmnd->cmnd[1]>>5));
4334  }
4335  err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4336  } else {
4337  if (vport->phba->cfg_enable_bg) {
4339  "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4340  "x%x sector x%llx cnt %u pt %x\n",
4341  cmnd->cmnd[0],
4342  (unsigned long long)scsi_get_lba(cmnd),
4343  blk_rq_sectors(cmnd->request),
4344  (cmnd->cmnd[1]>>5));
4345  }
4346  err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4347  }
4348 
4349  if (err)
4350  goto out_host_busy_free_buf;
4351 
4352  lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4353 
4354  atomic_inc(&ndlp->cmd_pending);
4355  err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4356  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4357  if (err) {
4358  atomic_dec(&ndlp->cmd_pending);
4359  goto out_host_busy_free_buf;
4360  }
4361  if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4363  &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4364 
4365  if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4366  lpfc_poll_rearm_timer(phba);
4367  }
4368 
4369  return 0;
4370 
4371  out_host_busy_free_buf:
4372  lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4373  lpfc_release_scsi_buf(phba, lpfc_cmd);
4374  out_host_busy:
4375  return SCSI_MLQUEUE_HOST_BUSY;
4376 
4377  out_tgt_busy:
4378  return SCSI_MLQUEUE_TARGET_BUSY;
4379 
4380  out_fail_command:
4381  cmnd->scsi_done(cmnd);
4382  return 0;
4383 }
4384 
4385 
4396 static int
4397 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4398 {
4399  struct Scsi_Host *shost = cmnd->device->host;
4400  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4401  struct lpfc_hba *phba = vport->phba;
4402  struct lpfc_iocbq *iocb;
4403  struct lpfc_iocbq *abtsiocb;
4404  struct lpfc_scsi_buf *lpfc_cmd;
4405  IOCB_t *cmd, *icmd;
4406  int ret = SUCCESS, status = 0;
4407  unsigned long flags;
4409 
4410  status = fc_block_scsi_eh(cmnd);
4411  if (status != 0 && status != SUCCESS)
4412  return status;
4413 
4414  spin_lock_irqsave(&phba->hbalock, flags);
4415  /* driver queued commands are in process of being flushed */
4416  if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4417  spin_unlock_irqrestore(&phba->hbalock, flags);
4419  "3168 SCSI Layer abort requested I/O has been "
4420  "flushed by LLD.\n");
4421  return FAILED;
4422  }
4423 
4424  lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4425  if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4426  spin_unlock_irqrestore(&phba->hbalock, flags);
4428  "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4429  "x%x ID %d LUN %d\n",
4430  SUCCESS, cmnd->device->id, cmnd->device->lun);
4431  return SUCCESS;
4432  }
4433 
4434  iocb = &lpfc_cmd->cur_iocbq;
4435  /* the command is in process of being cancelled */
4436  if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4437  spin_unlock_irqrestore(&phba->hbalock, flags);
4439  "3169 SCSI Layer abort requested I/O has been "
4440  "cancelled by LLD.\n");
4441  return FAILED;
4442  }
4443  /*
4444  * If pCmd field of the corresponding lpfc_scsi_buf structure
4445  * points to a different SCSI command, then the driver has
4446  * already completed this command, but the midlayer did not
4447  * see the completion before the eh fired. Just return SUCCESS.
4448  */
4449  if (lpfc_cmd->pCmd != cmnd) {
4451  "3170 SCSI Layer abort requested I/O has been "
4452  "completed by LLD.\n");
4453  goto out_unlock;
4454  }
4455 
4456  BUG_ON(iocb->context1 != lpfc_cmd);
4457 
4458  abtsiocb = __lpfc_sli_get_iocbq(phba);
4459  if (abtsiocb == NULL) {
4460  ret = FAILED;
4461  goto out_unlock;
4462  }
4463 
4464  /*
4465  * The scsi command can not be in txq and it is in flight because the
4466  * pCmd is still pointig at the SCSI command we have to abort. There
4467  * is no need to search the txcmplq. Just send an abort to the FW.
4468  */
4469 
4470  cmd = &iocb->iocb;
4471  icmd = &abtsiocb->iocb;
4473  icmd->un.acxri.abortContextTag = cmd->ulpContext;
4474  if (phba->sli_rev == LPFC_SLI_REV4)
4475  icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4476  else
4477  icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4478 
4479  icmd->ulpLe = 1;
4480  icmd->ulpClass = cmd->ulpClass;
4481 
4482  /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4483  abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4484  abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4485 
4486  if (lpfc_is_link_up(phba))
4487  icmd->ulpCommand = CMD_ABORT_XRI_CN;
4488  else
4489  icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4490 
4491  abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4492  abtsiocb->vport = vport;
4493  /* no longer need the lock after this point */
4494  spin_unlock_irqrestore(&phba->hbalock, flags);
4495 
4496  if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4497  IOCB_ERROR) {
4498  lpfc_sli_release_iocbq(phba, abtsiocb);
4499  ret = FAILED;
4500  goto out;
4501  }
4502 
4503  if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4505  &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4506 
4507  lpfc_cmd->waitq = &waitq;
4508  /* Wait for abort to complete */
4510  (lpfc_cmd->pCmd != cmnd),
4511  (2*vport->cfg_devloss_tmo*HZ));
4512  lpfc_cmd->waitq = NULL;
4513 
4514  if (lpfc_cmd->pCmd == cmnd) {
4515  ret = FAILED;
4517  "0748 abort handler timed out waiting "
4518  "for abortng I/O (xri:x%x) to complete: "
4519  "ret %#x, ID %d, LUN %d\n",
4520  iocb->sli4_xritag, ret,
4521  cmnd->device->id, cmnd->device->lun);
4522  }
4523  goto out;
4524 
4525 out_unlock:
4526  spin_unlock_irqrestore(&phba->hbalock, flags);
4527 out:
4529  "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4530  "LUN %d\n", ret, cmnd->device->id,
4531  cmnd->device->lun);
4532  return ret;
4533 }
4534 
4535 static char *
4536 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4537 {
4538  switch (task_mgmt_cmd) {
4539  case FCP_ABORT_TASK_SET:
4540  return "ABORT_TASK_SET";
4541  case FCP_CLEAR_TASK_SET:
4542  return "FCP_CLEAR_TASK_SET";
4543  case FCP_BUS_RESET:
4544  return "FCP_BUS_RESET";
4545  case FCP_LUN_RESET:
4546  return "FCP_LUN_RESET";
4547  case FCP_TARGET_RESET:
4548  return "FCP_TARGET_RESET";
4549  case FCP_CLEAR_ACA:
4550  return "FCP_CLEAR_ACA";
4551  case FCP_TERMINATE_TASK:
4552  return "FCP_TERMINATE_TASK";
4553  default:
4554  return "unknown";
4555  }
4556 }
4557 
4573 static int
4574 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4575  unsigned tgt_id, unsigned int lun_id,
4576  uint8_t task_mgmt_cmd)
4577 {
4578  struct lpfc_hba *phba = vport->phba;
4579  struct lpfc_scsi_buf *lpfc_cmd;
4580  struct lpfc_iocbq *iocbq;
4581  struct lpfc_iocbq *iocbqrsp;
4582  struct lpfc_nodelist *pnode = rdata->pnode;
4583  int ret;
4584  int status;
4585 
4586  if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4587  return FAILED;
4588 
4589  lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
4590  if (lpfc_cmd == NULL)
4591  return FAILED;
4592  lpfc_cmd->timeout = 60;
4593  lpfc_cmd->rdata = rdata;
4594 
4595  status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4596  task_mgmt_cmd);
4597  if (!status) {
4598  lpfc_release_scsi_buf(phba, lpfc_cmd);
4599  return FAILED;
4600  }
4601 
4602  iocbq = &lpfc_cmd->cur_iocbq;
4603  iocbqrsp = lpfc_sli_get_iocbq(phba);
4604  if (iocbqrsp == NULL) {
4605  lpfc_release_scsi_buf(phba, lpfc_cmd);
4606  return FAILED;
4607  }
4608 
4610  "0702 Issue %s to TGT %d LUN %d "
4611  "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4612  lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4613  pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4614  iocbq->iocb_flag);
4615 
4616  status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4617  iocbq, iocbqrsp, lpfc_cmd->timeout);
4618  if (status != IOCB_SUCCESS) {
4619  if (status == IOCB_TIMEDOUT) {
4620  iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4621  ret = TIMEOUT_ERROR;
4622  } else
4623  ret = FAILED;
4624  lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4626  "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4627  "iocb_flag x%x\n",
4628  lpfc_taskmgmt_name(task_mgmt_cmd),
4629  tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4630  iocbqrsp->iocb.un.ulpWord[4],
4631  iocbq->iocb_flag);
4632  } else if (status == IOCB_BUSY)
4633  ret = FAILED;
4634  else
4635  ret = SUCCESS;
4636 
4637  lpfc_sli_release_iocbq(phba, iocbqrsp);
4638 
4639  if (ret != TIMEOUT_ERROR)
4640  lpfc_release_scsi_buf(phba, lpfc_cmd);
4641 
4642  return ret;
4643 }
4644 
4657 static int
4658 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4659 {
4660  struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4661  struct lpfc_nodelist *pnode;
4662  unsigned long later;
4663 
4664  if (!rdata) {
4666  "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4667  return FAILED;
4668  }
4669  pnode = rdata->pnode;
4670  /*
4671  * If target is not in a MAPPED state, delay until
4672  * target is rediscovered or devloss timeout expires.
4673  */
4674  later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4675  while (time_after(later, jiffies)) {
4676  if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4677  return FAILED;
4678  if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4679  return SUCCESS;
4681  rdata = cmnd->device->hostdata;
4682  if (!rdata)
4683  return FAILED;
4684  pnode = rdata->pnode;
4685  }
4686  if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4687  (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4688  return FAILED;
4689  return SUCCESS;
4690 }
4691 
4708 static int
4709 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
4710  uint64_t lun_id, lpfc_ctx_cmd context)
4711 {
4712  struct lpfc_hba *phba = vport->phba;
4713  unsigned long later;
4714  int cnt;
4715 
4716  cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4717  if (cnt)
4718  lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
4719  tgt_id, lun_id, context);
4720  later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4721  while (time_after(later, jiffies) && cnt) {
4723  cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4724  }
4725  if (cnt) {
4727  "0724 I/O flush failure for context %s : cnt x%x\n",
4728  ((context == LPFC_CTX_LUN) ? "LUN" :
4729  ((context == LPFC_CTX_TGT) ? "TGT" :
4730  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
4731  cnt);
4732  return FAILED;
4733  }
4734  return SUCCESS;
4735 }
4736 
4748 static int
4749 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4750 {
4751  struct Scsi_Host *shost = cmnd->device->host;
4752  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4753  struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4754  struct lpfc_nodelist *pnode;
4755  unsigned tgt_id = cmnd->device->id;
4756  unsigned int lun_id = cmnd->device->lun;
4758  int status, ret = SUCCESS;
4759 
4760  if (!rdata) {
4762  "0798 Device Reset rport failure: rdata x%p\n", rdata);
4763  return FAILED;
4764  }
4765  pnode = rdata->pnode;
4766  status = fc_block_scsi_eh(cmnd);
4767  if (status != 0 && status != SUCCESS)
4768  return status;
4769 
4770  status = lpfc_chk_tgt_mapped(vport, cmnd);
4771  if (status == FAILED) {
4773  "0721 Device Reset rport failure: rdata x%p\n", rdata);
4774  return FAILED;
4775  }
4776 
4777  scsi_event.event_type = FC_REG_SCSI_EVENT;
4778  scsi_event.subcategory = LPFC_EVENT_LUNRESET;
4779  scsi_event.lun = lun_id;
4780  memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4781  memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4782 
4784  sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4785 
4786  status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4787  FCP_LUN_RESET);
4788 
4790  "0713 SCSI layer issued Device Reset (%d, %d) "
4791  "return x%x\n", tgt_id, lun_id, status);
4792 
4793  /*
4794  * We have to clean up i/o as : they may be orphaned by the TMF;
4795  * or if the TMF failed, they may be in an indeterminate state.
4796  * So, continue on.
4797  * We will report success if all the i/o aborts successfully.
4798  */
4799  ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4800  LPFC_CTX_LUN);
4801  return ret;
4802 }
4803 
4815 static int
4816 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4817 {
4818  struct Scsi_Host *shost = cmnd->device->host;
4819  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4820  struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4821  struct lpfc_nodelist *pnode;
4822  unsigned tgt_id = cmnd->device->id;
4823  unsigned int lun_id = cmnd->device->lun;
4825  int status, ret = SUCCESS;
4826 
4827  if (!rdata) {
4829  "0799 Target Reset rport failure: rdata x%p\n", rdata);
4830  return FAILED;
4831  }
4832  pnode = rdata->pnode;
4833  status = fc_block_scsi_eh(cmnd);
4834  if (status != 0 && status != SUCCESS)
4835  return status;
4836 
4837  status = lpfc_chk_tgt_mapped(vport, cmnd);
4838  if (status == FAILED) {
4840  "0722 Target Reset rport failure: rdata x%p\n", rdata);
4841  return FAILED;
4842  }
4843 
4844  scsi_event.event_type = FC_REG_SCSI_EVENT;
4845  scsi_event.subcategory = LPFC_EVENT_TGTRESET;
4846  scsi_event.lun = 0;
4847  memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4848  memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4849 
4851  sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4852 
4853  status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4855 
4857  "0723 SCSI layer issued Target Reset (%d, %d) "
4858  "return x%x\n", tgt_id, lun_id, status);
4859 
4860  /*
4861  * We have to clean up i/o as : they may be orphaned by the TMF;
4862  * or if the TMF failed, they may be in an indeterminate state.
4863  * So, continue on.
4864  * We will report success if all the i/o aborts successfully.
4865  */
4866  ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4867  LPFC_CTX_TGT);
4868  return ret;
4869 }
4870 
4882 static int
4883 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4884 {
4885  struct Scsi_Host *shost = cmnd->device->host;
4886  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4887  struct lpfc_nodelist *ndlp = NULL;
4889  int match;
4890  int ret = SUCCESS, status, i;
4891 
4892  scsi_event.event_type = FC_REG_SCSI_EVENT;
4893  scsi_event.subcategory = LPFC_EVENT_BUSRESET;
4894  scsi_event.lun = 0;
4895  memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
4896  memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
4897 
4899  sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4900 
4901  status = fc_block_scsi_eh(cmnd);
4902  if (status != 0 && status != SUCCESS)
4903  return status;
4904 
4905  /*
4906  * Since the driver manages a single bus device, reset all
4907  * targets known to the driver. Should any target reset
4908  * fail, this routine returns failure to the midlayer.
4909  */
4910  for (i = 0; i < LPFC_MAX_TARGET; i++) {
4911  /* Search for mapped node by target ID */
4912  match = 0;
4913  spin_lock_irq(shost->host_lock);
4914  list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4915  if (!NLP_CHK_NODE_ACT(ndlp))
4916  continue;
4917  if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
4918  ndlp->nlp_sid == i &&
4919  ndlp->rport) {
4920  match = 1;
4921  break;
4922  }
4923  }
4924  spin_unlock_irq(shost->host_lock);
4925  if (!match)
4926  continue;
4927 
4928  status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
4929  i, 0, FCP_TARGET_RESET);
4930 
4931  if (status != SUCCESS) {
4933  "0700 Bus Reset on target %d failed\n",
4934  i);
4935  ret = FAILED;
4936  }
4937  }
4938  /*
4939  * We have to clean up i/o as : they may be orphaned by the TMFs
4940  * above; or if any of the TMFs failed, they may be in an
4941  * indeterminate state.
4942  * We will report success if all the i/o aborts successfully.
4943  */
4944 
4945  status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
4946  if (status != SUCCESS)
4947  ret = FAILED;
4948 
4950  "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
4951  return ret;
4952 }
4953 
4970 static int
4971 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
4972 {
4973  struct Scsi_Host *shost = cmnd->device->host;
4974  struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4975  struct lpfc_hba *phba = vport->phba;
4976  int rc, ret = SUCCESS;
4977 
4979  lpfc_offline(phba);
4980  rc = lpfc_sli_brdrestart(phba);
4981  if (rc)
4982  ret = FAILED;
4983  lpfc_online(phba);
4984  lpfc_unblock_mgmt_io(phba);
4985 
4987  "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
4988  return ret;
4989 }
4990 
5004 static int
5005 lpfc_slave_alloc(struct scsi_device *sdev)
5006 {
5007  struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5008  struct lpfc_hba *phba = vport->phba;
5009  struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5010  uint32_t total = 0;
5011  uint32_t num_to_alloc = 0;
5012  int num_allocated = 0;
5013  uint32_t sdev_cnt;
5014 
5015  if (!rport || fc_remote_port_chkready(rport))
5016  return -ENXIO;
5017 
5018  sdev->hostdata = rport->dd_data;
5019  sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5020 
5021  /*
5022  * Populate the cmds_per_lun count scsi_bufs into this host's globally
5023  * available list of scsi buffers. Don't allocate more than the
5024  * HBA limit conveyed to the midlayer via the host structure. The
5025  * formula accounts for the lun_queue_depth + error handlers + 1
5026  * extra. This list of scsi bufs exists for the lifetime of the driver.
5027  */
5028  total = phba->total_scsi_bufs;
5029  num_to_alloc = vport->cfg_lun_queue_depth + 2;
5030 
5031  /* If allocated buffers are enough do nothing */
5032  if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5033  return 0;
5034 
5035  /* Allow some exchanges to be available always to complete discovery */
5036  if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5038  "0704 At limitation of %d preallocated "
5039  "command buffers\n", total);
5040  return 0;
5041  /* Allow some exchanges to be available always to complete discovery */
5042  } else if (total + num_to_alloc >
5045  "0705 Allocation request of %d "
5046  "command buffers will exceed max of %d. "
5047  "Reducing allocation request to %d.\n",
5048  num_to_alloc, phba->cfg_hba_queue_depth,
5049  (phba->cfg_hba_queue_depth - total));
5050  num_to_alloc = phba->cfg_hba_queue_depth - total;
5051  }
5052  num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5053  if (num_to_alloc != num_allocated) {
5055  "0708 Allocation request of %d "
5056  "command buffers did not succeed. "
5057  "Allocated %d buffers.\n",
5058  num_to_alloc, num_allocated);
5059  }
5060  if (num_allocated > 0)
5061  phba->total_scsi_bufs += num_allocated;
5062  return 0;
5063 }
5064 
5076 static int
5077 lpfc_slave_configure(struct scsi_device *sdev)
5078 {
5079  struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5080  struct lpfc_hba *phba = vport->phba;
5081 
5082  if (sdev->tagged_supported)
5083  scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5084  else
5085  scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5086 
5087  if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5089  &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5090  if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5091  lpfc_poll_rearm_timer(phba);
5092  }
5093 
5094  return 0;
5095 }
5096 
5103 static void
5104 lpfc_slave_destroy(struct scsi_device *sdev)
5105 {
5106  struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5107  struct lpfc_hba *phba = vport->phba;
5108  atomic_dec(&phba->sdev_cnt);
5109  sdev->hostdata = NULL;
5110  return;
5111 }
5112 
5113 
5115  .module = THIS_MODULE,
5116  .name = LPFC_DRIVER_NAME,
5117  .info = lpfc_info,
5118  .queuecommand = lpfc_queuecommand,
5119  .eh_abort_handler = lpfc_abort_handler,
5120  .eh_device_reset_handler = lpfc_device_reset_handler,
5121  .eh_target_reset_handler = lpfc_target_reset_handler,
5122  .eh_bus_reset_handler = lpfc_bus_reset_handler,
5123  .eh_host_reset_handler = lpfc_host_reset_handler,
5124  .slave_alloc = lpfc_slave_alloc,
5125  .slave_configure = lpfc_slave_configure,
5126  .slave_destroy = lpfc_slave_destroy,
5127  .scan_finished = lpfc_scan_finished,
5128  .this_id = -1,
5129  .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5130  .cmd_per_lun = LPFC_CMD_PER_LUN,
5131  .use_clustering = ENABLE_CLUSTERING,
5132  .shost_attrs = lpfc_hba_attrs,
5133  .max_sectors = 0xFFFF,
5134  .vendor_id = LPFC_NL_VENDOR_ID,
5135  .change_queue_depth = lpfc_change_queue_depth,
5136 };
5137 
5139  .module = THIS_MODULE,
5140  .name = LPFC_DRIVER_NAME,
5141  .info = lpfc_info,
5142  .queuecommand = lpfc_queuecommand,
5143  .eh_abort_handler = lpfc_abort_handler,
5144  .eh_device_reset_handler = lpfc_device_reset_handler,
5145  .eh_target_reset_handler = lpfc_target_reset_handler,
5146  .eh_bus_reset_handler = lpfc_bus_reset_handler,
5147  .slave_alloc = lpfc_slave_alloc,
5148  .slave_configure = lpfc_slave_configure,
5149  .slave_destroy = lpfc_slave_destroy,
5150  .scan_finished = lpfc_scan_finished,
5151  .this_id = -1,
5152  .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5153  .cmd_per_lun = LPFC_CMD_PER_LUN,
5154  .use_clustering = ENABLE_CLUSTERING,
5155  .shost_attrs = lpfc_vport_attrs,
5156  .max_sectors = 0xFFFF,
5157  .change_queue_depth = lpfc_change_queue_depth,
5158 };