Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qla_iocb.c
Go to the documentation of this file.
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c) 2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12 
13 #include <scsi/scsi_tcq.h>
14 
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
26  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27  struct scsi_qla_host *vha = sp->fcport->vha;
28 
29  cflags = 0;
30 
31  /* Set transfer direction */
32  if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33  cflags = CF_WRITE;
34  vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35  } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36  cflags = CF_READ;
37  vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38  }
39  return (cflags);
40 }
41 
52 {
53  uint16_t iocbs;
54 
55  iocbs = 1;
56  if (dsds > 3) {
57  iocbs += (dsds - 3) / 7;
58  if ((dsds - 3) % 7)
59  iocbs++;
60  }
61  return (iocbs);
62 }
63 
74 {
75  uint16_t iocbs;
76 
77  iocbs = 1;
78  if (dsds > 2) {
79  iocbs += (dsds - 2) / 5;
80  if ((dsds - 2) % 5)
81  iocbs++;
82  }
83  return (iocbs);
84 }
85 
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95  cont_entry_t *cont_pkt;
96  struct req_que *req = vha->req;
97  /* Adjust ring index. */
98  req->ring_index++;
99  if (req->ring_index == req->length) {
100  req->ring_index = 0;
101  req->ring_ptr = req->ring;
102  } else {
103  req->ring_ptr++;
104  }
105 
106  cont_pkt = (cont_entry_t *)req->ring_ptr;
107 
108  /* Load packet defaults. */
109  *((uint32_t *)(&cont_pkt->entry_type)) =
111 
112  return (cont_pkt);
113 }
114 
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124  cont_a64_entry_t *cont_pkt;
125 
126  /* Adjust ring index. */
127  req->ring_index++;
128  if (req->ring_index == req->length) {
129  req->ring_index = 0;
130  req->ring_ptr = req->ring;
131  } else {
132  req->ring_ptr++;
133  }
134 
135  cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136 
137  /* Load packet defaults. */
138  *((uint32_t *)(&cont_pkt->entry_type)) =
140 
141  return (cont_pkt);
142 }
143 
144 static inline int
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146 {
147  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148  uint8_t guard = scsi_host_get_guard(cmd->device->host);
149 
150  /* We always use DIFF Bundling for best performance */
151  *fw_prot_opts = 0;
152 
153  /* Translate SCSI opcode to a protection opcode */
154  switch (scsi_get_prot_op(cmd)) {
156  *fw_prot_opts |= PO_MODE_DIF_REMOVE;
157  break;
159  *fw_prot_opts |= PO_MODE_DIF_INSERT;
160  break;
162  *fw_prot_opts |= PO_MODE_DIF_INSERT;
163  break;
165  *fw_prot_opts |= PO_MODE_DIF_REMOVE;
166  break;
167  case SCSI_PROT_READ_PASS:
169  if (guard & SHOST_DIX_GUARD_IP)
170  *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171  else
172  *fw_prot_opts |= PO_MODE_DIF_PASS;
173  break;
174  default: /* Normal Request */
175  *fw_prot_opts |= PO_MODE_DIF_PASS;
176  break;
177  }
178 
179  return scsi_prot_sg_count(cmd);
180 }
181 
182 /*
183  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184  * capable IOCB types.
185  *
186  * @sp: SRB command to process
187  * @cmd_pkt: Command type 2 IOCB
188  * @tot_dsds: Total number of segments to transfer
189  */
191  uint16_t tot_dsds)
192 {
193  uint16_t avail_dsds;
194  uint32_t *cur_dsd;
196  struct scsi_cmnd *cmd;
197  struct scatterlist *sg;
198  int i;
199 
200  cmd = GET_CMD_SP(sp);
201 
202  /* Update entry type to indicate Command Type 2 IOCB */
203  *((uint32_t *)(&cmd_pkt->entry_type)) =
205 
206  /* No data transfer */
207  if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
209  return;
210  }
211 
212  vha = sp->fcport->vha;
213  cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214 
215  /* Three DSDs are available in the Command Type 2 IOCB */
216  avail_dsds = 3;
217  cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
218 
219  /* Load data segments */
220  scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221  cont_entry_t *cont_pkt;
222 
223  /* Allocate additional continuation packets? */
224  if (avail_dsds == 0) {
225  /*
226  * Seven DSDs are available in the Continuation
227  * Type 0 IOCB.
228  */
229  cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230  cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
231  avail_dsds = 7;
232  }
233 
234  *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
235  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
236  avail_dsds--;
237  }
238 }
239 
249  uint16_t tot_dsds)
250 {
251  uint16_t avail_dsds;
252  uint32_t *cur_dsd;
254  struct scsi_cmnd *cmd;
255  struct scatterlist *sg;
256  int i;
257 
258  cmd = GET_CMD_SP(sp);
259 
260  /* Update entry type to indicate Command Type 3 IOCB */
261  *((uint32_t *)(&cmd_pkt->entry_type)) =
263 
264  /* No data transfer */
265  if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
267  return;
268  }
269 
270  vha = sp->fcport->vha;
271  cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272 
273  /* Two DSDs are available in the Command Type 3 IOCB */
274  avail_dsds = 2;
275  cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276 
277  /* Load data segments */
278  scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279  dma_addr_t sle_dma;
280  cont_a64_entry_t *cont_pkt;
281 
282  /* Allocate additional continuation packets? */
283  if (avail_dsds == 0) {
284  /*
285  * Five DSDs are available in the Continuation
286  * Type 1 IOCB.
287  */
288  cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289  cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290  avail_dsds = 5;
291  }
292 
293  sle_dma = sg_dma_address(sg);
294  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297  avail_dsds--;
298  }
299 }
300 
307 int
309 {
310  int ret, nseg;
311  unsigned long flags;
313  struct scsi_cmnd *cmd;
314  uint32_t *clr_ptr;
315  uint32_t index;
318  uint16_t cnt;
319  uint16_t req_cnt;
320  uint16_t tot_dsds;
321  struct device_reg_2xxx __iomem *reg;
322  struct qla_hw_data *ha;
323  struct req_que *req;
324  struct rsp_que *rsp;
325  char tag[2];
326 
327  /* Setup device pointers. */
328  ret = 0;
329  vha = sp->fcport->vha;
330  ha = vha->hw;
331  reg = &ha->iobase->isp;
332  cmd = GET_CMD_SP(sp);
333  req = ha->req_q_map[0];
334  rsp = ha->rsp_q_map[0];
335  /* So we know we haven't pci_map'ed anything yet */
336  tot_dsds = 0;
337 
338  /* Send marker if required */
339  if (vha->marker_needed != 0) {
340  if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
341  QLA_SUCCESS) {
342  return (QLA_FUNCTION_FAILED);
343  }
344  vha->marker_needed = 0;
345  }
346 
347  /* Acquire ring specific lock */
348  spin_lock_irqsave(&ha->hardware_lock, flags);
349 
350  /* Check for room in outstanding command list. */
351  handle = req->current_outstanding_cmd;
352  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
353  handle++;
354  if (handle == MAX_OUTSTANDING_COMMANDS)
355  handle = 1;
356  if (!req->outstanding_cmds[handle])
357  break;
358  }
359  if (index == MAX_OUTSTANDING_COMMANDS)
360  goto queuing_error;
361 
362  /* Map the sg table so we have an accurate count of sg entries needed */
363  if (scsi_sg_count(cmd)) {
364  nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
365  scsi_sg_count(cmd), cmd->sc_data_direction);
366  if (unlikely(!nseg))
367  goto queuing_error;
368  } else
369  nseg = 0;
370 
371  tot_dsds = nseg;
372 
373  /* Calculate the number of request entries needed. */
374  req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
375  if (req->cnt < (req_cnt + 2)) {
376  cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
377  if (req->ring_index < cnt)
378  req->cnt = cnt - req->ring_index;
379  else
380  req->cnt = req->length -
381  (req->ring_index - cnt);
382  /* If still no head room then bail out */
383  if (req->cnt < (req_cnt + 2))
384  goto queuing_error;
385  }
386 
387  /* Build command packet */
389  req->outstanding_cmds[handle] = sp;
390  sp->handle = handle;
391  cmd->host_scribble = (unsigned char *)(unsigned long)handle;
392  req->cnt -= req_cnt;
393 
394  cmd_pkt = (cmd_entry_t *)req->ring_ptr;
395  cmd_pkt->handle = handle;
396  /* Zero out remaining portion of packet. */
397  clr_ptr = (uint32_t *)cmd_pkt + 2;
398  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
399  cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
400 
401  /* Set target ID and LUN number*/
402  SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
403  cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
404 
405  /* Update tagged queuing modifier */
406  if (scsi_populate_tag_msg(cmd, tag)) {
407  switch (tag[0]) {
408  case HEAD_OF_QUEUE_TAG:
409  cmd_pkt->control_flags =
411  break;
412  case ORDERED_QUEUE_TAG:
413  cmd_pkt->control_flags =
415  break;
416  default:
417  cmd_pkt->control_flags =
419  break;
420  }
421  }
422 
423  /* Load SCSI command packet. */
424  memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
425  cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
426 
427  /* Build IOCB segments */
428  ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
429 
430  /* Set total data segment count. */
431  cmd_pkt->entry_count = (uint8_t)req_cnt;
432  wmb();
433 
434  /* Adjust ring index. */
435  req->ring_index++;
436  if (req->ring_index == req->length) {
437  req->ring_index = 0;
438  req->ring_ptr = req->ring;
439  } else
440  req->ring_ptr++;
441 
442  sp->flags |= SRB_DMA_VALID;
443 
444  /* Set chip new ring index. */
445  WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
446  RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
447 
448  /* Manage unprocessed RIO/ZIO commands in response queue. */
449  if (vha->flags.process_response_queue &&
452 
453  spin_unlock_irqrestore(&ha->hardware_lock, flags);
454  return (QLA_SUCCESS);
455 
456 queuing_error:
457  if (tot_dsds)
458  scsi_dma_unmap(cmd);
459 
460  spin_unlock_irqrestore(&ha->hardware_lock, flags);
461 
462  return (QLA_FUNCTION_FAILED);
463 }
464 
468 void
469 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
470 {
471  struct qla_hw_data *ha = vha->hw;
472  device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
473 
474  if (IS_QLA82XX(ha)) {
475  qla82xx_start_iocbs(vha);
476  } else {
477  /* Adjust ring index. */
478  req->ring_index++;
479  if (req->ring_index == req->length) {
480  req->ring_index = 0;
481  req->ring_ptr = req->ring;
482  } else
483  req->ring_ptr++;
484 
485  /* Set chip new ring index. */
486  if (ha->mqenable || IS_QLA83XX(ha)) {
487  WRT_REG_DWORD(req->req_q_in, req->ring_index);
488  RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
489  } else if (IS_FWI2_CAPABLE(ha)) {
490  WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
491  RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
492  } else {
493  WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
494  req->ring_index);
495  RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
496  }
497  }
498 }
499 
511 static int
512 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
513  struct rsp_que *rsp, uint16_t loop_id,
515 {
516  mrk_entry_t *mrk;
517  struct mrk_entry_24xx *mrk24;
518  struct qla_hw_data *ha = vha->hw;
519  scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
520 
521  mrk24 = NULL;
522  req = ha->req_q_map[0];
523  mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
524  if (mrk == NULL) {
525  ql_log(ql_log_warn, base_vha, 0x3026,
526  "Failed to allocate Marker IOCB.\n");
527 
528  return (QLA_FUNCTION_FAILED);
529  }
530 
531  mrk->entry_type = MARKER_TYPE;
532  mrk->modifier = type;
533  if (type != MK_SYNC_ALL) {
534  if (IS_FWI2_CAPABLE(ha)) {
535  mrk24 = (struct mrk_entry_24xx *) mrk;
536  mrk24->nport_handle = cpu_to_le16(loop_id);
537  mrk24->lun[1] = LSB(lun);
538  mrk24->lun[2] = MSB(lun);
539  host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
540  mrk24->vp_index = vha->vp_idx;
541  mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
542  } else {
543  SET_TARGET_ID(ha, mrk->target, loop_id);
544  mrk->lun = cpu_to_le16(lun);
545  }
546  }
547  wmb();
548 
549  qla2x00_start_iocbs(vha, req);
550 
551  return (QLA_SUCCESS);
552 }
553 
554 int
555 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
556  struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
557  uint8_t type)
558 {
559  int ret;
560  unsigned long flags = 0;
561 
562  spin_lock_irqsave(&vha->hw->hardware_lock, flags);
563  ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
564  spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
565 
566  return (ret);
567 }
568 
569 /*
570  * qla2x00_issue_marker
571  *
572  * Issue marker
573  * Caller CAN have hardware lock held as specified by ha_locked parameter.
574  * Might release it, then reaquire.
575  */
576 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
577 {
578  if (ha_locked) {
579  if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
581  return QLA_FUNCTION_FAILED;
582  } else {
583  if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
585  return QLA_FUNCTION_FAILED;
586  }
587  vha->marker_needed = 0;
588 
589  return QLA_SUCCESS;
590 }
591 
600 inline uint16_t
602 {
603  uint16_t iocbs;
604 
605  iocbs = 1;
606  if (dsds > 1) {
607  iocbs += (dsds - 1) / 5;
608  if ((dsds - 1) % 5)
609  iocbs++;
610  }
611  return iocbs;
612 }
613 
614 static inline int
615 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
616  uint16_t tot_dsds)
617 {
618  uint32_t *cur_dsd = NULL;
620  struct qla_hw_data *ha;
621  struct scsi_cmnd *cmd;
622  struct scatterlist *cur_seg;
623  uint32_t *dsd_seg;
624  void *next_dsd;
625  uint8_t avail_dsds;
626  uint8_t first_iocb = 1;
627  uint32_t dsd_list_len;
628  struct dsd_dma *dsd_ptr;
629  struct ct6_dsd *ctx;
630 
631  cmd = GET_CMD_SP(sp);
632 
633  /* Update entry type to indicate Command Type 3 IOCB */
634  *((uint32_t *)(&cmd_pkt->entry_type)) =
636 
637  /* No data transfer */
638  if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
639  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
640  return 0;
641  }
642 
643  vha = sp->fcport->vha;
644  ha = vha->hw;
645 
646  /* Set transfer direction */
647  if (cmd->sc_data_direction == DMA_TO_DEVICE) {
648  cmd_pkt->control_flags =
650  vha->qla_stats.output_bytes += scsi_bufflen(cmd);
651  } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
652  cmd_pkt->control_flags =
654  vha->qla_stats.input_bytes += scsi_bufflen(cmd);
655  }
656 
657  cur_seg = scsi_sglist(cmd);
658  ctx = GET_CMD_CTX_SP(sp);
659 
660  while (tot_dsds) {
661  avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
662  QLA_DSDS_PER_IOCB : tot_dsds;
663  tot_dsds -= avail_dsds;
664  dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
665 
666  dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
667  struct dsd_dma, list);
668  next_dsd = dsd_ptr->dsd_addr;
669  list_del(&dsd_ptr->list);
670  ha->gbl_dsd_avail--;
671  list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
672  ctx->dsd_use_cnt++;
673  ha->gbl_dsd_inuse++;
674 
675  if (first_iocb) {
676  first_iocb = 0;
677  dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
678  *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
679  *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
680  cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
681  } else {
682  *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
683  *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
684  *cur_dsd++ = cpu_to_le32(dsd_list_len);
685  }
686  cur_dsd = (uint32_t *)next_dsd;
687  while (avail_dsds) {
688  dma_addr_t sle_dma;
689 
690  sle_dma = sg_dma_address(cur_seg);
691  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
692  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
693  *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
694  cur_seg = sg_next(cur_seg);
695  avail_dsds--;
696  }
697  }
698 
699  /* Null termination */
700  *cur_dsd++ = 0;
701  *cur_dsd++ = 0;
702  *cur_dsd++ = 0;
704  return 0;
705 }
706 
707 /*
708  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
709  * for Command Type 6.
710  *
711  * @dsds: number of data segment decriptors needed
712  *
713  * Returns the number of dsd list needed to store @dsds.
714  */
715 inline uint16_t
717 {
718  uint16_t dsd_lists = 0;
719 
720  dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
721  if (dsds % QLA_DSDS_PER_IOCB)
722  dsd_lists++;
723  return dsd_lists;
724 }
725 
726 
735 inline void
737  uint16_t tot_dsds)
738 {
739  uint16_t avail_dsds;
740  uint32_t *cur_dsd;
742  struct scsi_cmnd *cmd;
743  struct scatterlist *sg;
744  int i;
745  struct req_que *req;
746 
747  cmd = GET_CMD_SP(sp);
748 
749  /* Update entry type to indicate Command Type 3 IOCB */
750  *((uint32_t *)(&cmd_pkt->entry_type)) =
752 
753  /* No data transfer */
754  if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
755  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
756  return;
757  }
758 
759  vha = sp->fcport->vha;
760  req = vha->req;
761 
762  /* Set transfer direction */
763  if (cmd->sc_data_direction == DMA_TO_DEVICE) {
764  cmd_pkt->task_mgmt_flags =
766  vha->qla_stats.output_bytes += scsi_bufflen(cmd);
767  } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
768  cmd_pkt->task_mgmt_flags =
770  vha->qla_stats.input_bytes += scsi_bufflen(cmd);
771  }
772 
773  /* One DSD is available in the Command Type 3 IOCB */
774  avail_dsds = 1;
775  cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
776 
777  /* Load data segments */
778 
779  scsi_for_each_sg(cmd, sg, tot_dsds, i) {
780  dma_addr_t sle_dma;
781  cont_a64_entry_t *cont_pkt;
782 
783  /* Allocate additional continuation packets? */
784  if (avail_dsds == 0) {
785  /*
786  * Five DSDs are available in the Continuation
787  * Type 1 IOCB.
788  */
789  cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
790  cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
791  avail_dsds = 5;
792  }
793 
794  sle_dma = sg_dma_address(sg);
795  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
796  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
797  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
798  avail_dsds--;
799  }
800 }
801 
805  uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
806  uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
807 };
808 
809 /*
810  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
811  *
812  */
813 static inline void
814 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
815  unsigned int protcnt)
816 {
817  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
818 
819  switch (scsi_get_prot_type(cmd)) {
820  case SCSI_PROT_DIF_TYPE0:
821  /*
822  * No check for ql2xenablehba_err_chk, as it would be an
823  * I/O error if hba tag generation is not done.
824  */
825  pkt->ref_tag = cpu_to_le32((uint32_t)
826  (0xffffffff & scsi_get_lba(cmd)));
827 
828  if (!qla2x00_hba_err_chk_enabled(sp))
829  break;
830 
831  pkt->ref_tag_mask[0] = 0xff;
832  pkt->ref_tag_mask[1] = 0xff;
833  pkt->ref_tag_mask[2] = 0xff;
834  pkt->ref_tag_mask[3] = 0xff;
835  break;
836 
837  /*
838  * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
839  * match LBA in CDB + N
840  */
841  case SCSI_PROT_DIF_TYPE2:
843  pkt->app_tag_mask[0] = 0x0;
844  pkt->app_tag_mask[1] = 0x0;
845 
846  pkt->ref_tag = cpu_to_le32((uint32_t)
847  (0xffffffff & scsi_get_lba(cmd)));
848 
849  if (!qla2x00_hba_err_chk_enabled(sp))
850  break;
851 
852  /* enable ALL bytes of the ref tag */
853  pkt->ref_tag_mask[0] = 0xff;
854  pkt->ref_tag_mask[1] = 0xff;
855  pkt->ref_tag_mask[2] = 0xff;
856  pkt->ref_tag_mask[3] = 0xff;
857  break;
858 
859  /* For Type 3 protection: 16 bit GUARD only */
860  case SCSI_PROT_DIF_TYPE3:
861  pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
862  pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
863  0x00;
864  break;
865 
866  /*
867  * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
868  * 16 bit app tag.
869  */
870  case SCSI_PROT_DIF_TYPE1:
871  pkt->ref_tag = cpu_to_le32((uint32_t)
872  (0xffffffff & scsi_get_lba(cmd)));
874  pkt->app_tag_mask[0] = 0x0;
875  pkt->app_tag_mask[1] = 0x0;
876 
877  if (!qla2x00_hba_err_chk_enabled(sp))
878  break;
879 
880  /* enable ALL bytes of the ref tag */
881  pkt->ref_tag_mask[0] = 0xff;
882  pkt->ref_tag_mask[1] = 0xff;
883  pkt->ref_tag_mask[2] = 0xff;
884  pkt->ref_tag_mask[3] = 0xff;
885  break;
886  }
887 }
888 
889 struct qla2_sgx {
890  dma_addr_t dma_addr; /* OUT */
891  uint32_t dma_len; /* OUT */
892 
893  uint32_t tot_bytes; /* IN */
894  struct scatterlist *cur_sg; /* IN */
895 
896  /* for book keeping, bzero on initial invocation */
900 
901  /* for debugging */
904 };
905 
906 static int
907 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
908  uint32_t *partial)
909 {
910  struct scatterlist *sg;
911  uint32_t cumulative_partial, sg_len;
912  dma_addr_t sg_dma_addr;
913 
914  if (sgx->num_bytes == sgx->tot_bytes)
915  return 0;
916 
917  sg = sgx->cur_sg;
918  cumulative_partial = sgx->tot_partial;
919 
920  sg_dma_addr = sg_dma_address(sg);
921  sg_len = sg_dma_len(sg);
922 
923  sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
924 
925  if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
926  sgx->dma_len = (blk_sz - cumulative_partial);
927  sgx->tot_partial = 0;
928  sgx->num_bytes += blk_sz;
929  *partial = 0;
930  } else {
931  sgx->dma_len = sg_len - sgx->bytes_consumed;
932  sgx->tot_partial += sgx->dma_len;
933  *partial = 1;
934  }
935 
936  sgx->bytes_consumed += sgx->dma_len;
937 
938  if (sg_len == sgx->bytes_consumed) {
939  sg = sg_next(sg);
940  sgx->num_sg++;
941  sgx->cur_sg = sg;
942  sgx->bytes_consumed = 0;
943  }
944 
945  return 1;
946 }
947 
948 static int
949 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
950  uint32_t *dsd, uint16_t tot_dsds)
951 {
952  void *next_dsd;
953  uint8_t avail_dsds = 0;
954  uint32_t dsd_list_len;
955  struct dsd_dma *dsd_ptr;
956  struct scatterlist *sg_prot;
957  uint32_t *cur_dsd = dsd;
958  uint16_t used_dsds = tot_dsds;
959 
960  uint32_t prot_int;
961  uint32_t partial;
962  struct qla2_sgx sgx;
963  dma_addr_t sle_dma;
964  uint32_t sle_dma_len, tot_prot_dma_len = 0;
965  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
966 
967  prot_int = cmd->device->sector_size;
968 
969  memset(&sgx, 0, sizeof(struct qla2_sgx));
970  sgx.tot_bytes = scsi_bufflen(cmd);
971  sgx.cur_sg = scsi_sglist(cmd);
972  sgx.sp = sp;
973 
974  sg_prot = scsi_prot_sglist(cmd);
975 
976  while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
977 
978  sle_dma = sgx.dma_addr;
979  sle_dma_len = sgx.dma_len;
980 alloc_and_fill:
981  /* Allocate additional continuation packets? */
982  if (avail_dsds == 0) {
983  avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
984  QLA_DSDS_PER_IOCB : used_dsds;
985  dsd_list_len = (avail_dsds + 1) * 12;
986  used_dsds -= avail_dsds;
987 
988  /* allocate tracking DS */
989  dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
990  if (!dsd_ptr)
991  return 1;
992 
993  /* allocate new list */
994  dsd_ptr->dsd_addr = next_dsd =
996  &dsd_ptr->dsd_list_dma);
997 
998  if (!next_dsd) {
999  /*
1000  * Need to cleanup only this dsd_ptr, rest
1001  * will be done by sp_free_dma()
1002  */
1003  kfree(dsd_ptr);
1004  return 1;
1005  }
1006 
1007  list_add_tail(&dsd_ptr->list,
1008  &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1009 
1011 
1012  /* add new list to cmd iocb or last list */
1013  *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1014  *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1015  *cur_dsd++ = dsd_list_len;
1016  cur_dsd = (uint32_t *)next_dsd;
1017  }
1018  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1019  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1020  *cur_dsd++ = cpu_to_le32(sle_dma_len);
1021  avail_dsds--;
1022 
1023  if (partial == 0) {
1024  /* Got a full protection interval */
1025  sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1026  sle_dma_len = 8;
1027 
1028  tot_prot_dma_len += sle_dma_len;
1029  if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1030  tot_prot_dma_len = 0;
1031  sg_prot = sg_next(sg_prot);
1032  }
1033 
1034  partial = 1; /* So as to not re-enter this block */
1035  goto alloc_and_fill;
1036  }
1037  }
1038  /* Null termination */
1039  *cur_dsd++ = 0;
1040  *cur_dsd++ = 0;
1041  *cur_dsd++ = 0;
1042  return 0;
1043 }
1044 
1045 static int
1046 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1047  uint16_t tot_dsds)
1048 {
1049  void *next_dsd;
1050  uint8_t avail_dsds = 0;
1051  uint32_t dsd_list_len;
1052  struct dsd_dma *dsd_ptr;
1053  struct scatterlist *sg;
1054  uint32_t *cur_dsd = dsd;
1055  int i;
1056  uint16_t used_dsds = tot_dsds;
1057  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1058 
1059  scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1060  dma_addr_t sle_dma;
1061 
1062  /* Allocate additional continuation packets? */
1063  if (avail_dsds == 0) {
1064  avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1065  QLA_DSDS_PER_IOCB : used_dsds;
1066  dsd_list_len = (avail_dsds + 1) * 12;
1067  used_dsds -= avail_dsds;
1068 
1069  /* allocate tracking DS */
1070  dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1071  if (!dsd_ptr)
1072  return 1;
1073 
1074  /* allocate new list */
1075  dsd_ptr->dsd_addr = next_dsd =
1077  &dsd_ptr->dsd_list_dma);
1078 
1079  if (!next_dsd) {
1080  /*
1081  * Need to cleanup only this dsd_ptr, rest
1082  * will be done by sp_free_dma()
1083  */
1084  kfree(dsd_ptr);
1085  return 1;
1086  }
1087 
1088  list_add_tail(&dsd_ptr->list,
1089  &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1090 
1092 
1093  /* add new list to cmd iocb or last list */
1094  *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1095  *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1096  *cur_dsd++ = dsd_list_len;
1097  cur_dsd = (uint32_t *)next_dsd;
1098  }
1099  sle_dma = sg_dma_address(sg);
1100 
1101  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1102  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1103  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1104  avail_dsds--;
1105 
1106  }
1107  /* Null termination */
1108  *cur_dsd++ = 0;
1109  *cur_dsd++ = 0;
1110  *cur_dsd++ = 0;
1111  return 0;
1112 }
1113 
1114 static int
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116  uint32_t *dsd,
1117  uint16_t tot_dsds)
1118 {
1119  void *next_dsd;
1120  uint8_t avail_dsds = 0;
1121  uint32_t dsd_list_len;
1122  struct dsd_dma *dsd_ptr;
1123  struct scatterlist *sg;
1124  int i;
1125  struct scsi_cmnd *cmd;
1126  uint32_t *cur_dsd = dsd;
1127  uint16_t used_dsds = tot_dsds;
1128 
1129  cmd = GET_CMD_SP(sp);
1130  scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1131  dma_addr_t sle_dma;
1132 
1133  /* Allocate additional continuation packets? */
1134  if (avail_dsds == 0) {
1135  avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1136  QLA_DSDS_PER_IOCB : used_dsds;
1137  dsd_list_len = (avail_dsds + 1) * 12;
1138  used_dsds -= avail_dsds;
1139 
1140  /* allocate tracking DS */
1141  dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1142  if (!dsd_ptr)
1143  return 1;
1144 
1145  /* allocate new list */
1146  dsd_ptr->dsd_addr = next_dsd =
1148  &dsd_ptr->dsd_list_dma);
1149 
1150  if (!next_dsd) {
1151  /*
1152  * Need to cleanup only this dsd_ptr, rest
1153  * will be done by sp_free_dma()
1154  */
1155  kfree(dsd_ptr);
1156  return 1;
1157  }
1158 
1159  list_add_tail(&dsd_ptr->list,
1160  &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1161 
1163 
1164  /* add new list to cmd iocb or last list */
1165  *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166  *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167  *cur_dsd++ = dsd_list_len;
1168  cur_dsd = (uint32_t *)next_dsd;
1169  }
1170  sle_dma = sg_dma_address(sg);
1171 
1172  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1175 
1176  avail_dsds--;
1177  }
1178  /* Null termination */
1179  *cur_dsd++ = 0;
1180  *cur_dsd++ = 0;
1181  *cur_dsd++ = 0;
1182  return 0;
1183 }
1184 
1193 static inline int
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195  uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1196 {
1197  uint32_t *cur_dsd, *fcp_dl;
1199  struct scsi_cmnd *cmd;
1200  struct scatterlist *cur_seg;
1201  int sgc;
1202  uint32_t total_bytes = 0;
1203  uint32_t data_bytes;
1204  uint32_t dif_bytes;
1205  uint8_t bundling = 1;
1207  uint8_t *clr_ptr;
1208  struct crc_context *crc_ctx_pkt = NULL;
1209  struct qla_hw_data *ha;
1210  uint8_t additional_fcpcdb_len;
1211  uint16_t fcp_cmnd_len;
1212  struct fcp_cmnd *fcp_cmnd;
1213  dma_addr_t crc_ctx_dma;
1214  char tag[2];
1215 
1216  cmd = GET_CMD_SP(sp);
1217 
1218  sgc = 0;
1219  /* Update entry type to indicate Command Type CRC_2 IOCB */
1220  *((uint32_t *)(&cmd_pkt->entry_type)) =
1222 
1223  vha = sp->fcport->vha;
1224  ha = vha->hw;
1225 
1226  /* No data transfer */
1227  data_bytes = scsi_bufflen(cmd);
1228  if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1229  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1230  return QLA_SUCCESS;
1231  }
1232 
1233  cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1234 
1235  /* Set transfer direction */
1236  if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1237  cmd_pkt->control_flags =
1239  } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1240  cmd_pkt->control_flags =
1242  }
1243 
1244  if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1245  (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1246  (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1247  (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1248  bundling = 0;
1249 
1250  /* Allocate CRC context from global pool */
1251  crc_ctx_pkt = sp->u.scmd.ctx =
1252  dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1253 
1254  if (!crc_ctx_pkt)
1255  goto crc_queuing_error;
1256 
1257  /* Zero out CTX area. */
1258  clr_ptr = (uint8_t *)crc_ctx_pkt;
1259  memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1260 
1261  crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1262 
1264 
1265  /* Set handle */
1266  crc_ctx_pkt->handle = cmd_pkt->handle;
1267 
1268  INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1269 
1270  qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1271  &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1272 
1273  cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1274  cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1276 
1277  /* Determine SCSI command length -- align to 4 byte boundary */
1278  if (cmd->cmd_len > 16) {
1279  additional_fcpcdb_len = cmd->cmd_len - 16;
1280  if ((cmd->cmd_len % 4) != 0) {
1281  /* SCSI cmd > 16 bytes must be multiple of 4 */
1282  goto crc_queuing_error;
1283  }
1284  fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1285  } else {
1286  additional_fcpcdb_len = 0;
1287  fcp_cmnd_len = 12 + 16 + 4;
1288  }
1289 
1290  fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1291 
1292  fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1293  if (cmd->sc_data_direction == DMA_TO_DEVICE)
1294  fcp_cmnd->additional_cdb_len |= 1;
1295  else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1296  fcp_cmnd->additional_cdb_len |= 2;
1297 
1298  int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1299  memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1300  cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1301  cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1302  LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303  cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1304  MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1305  fcp_cmnd->task_management = 0;
1306 
1307  /*
1308  * Update tagged queuing modifier if using command tag queuing
1309  */
1310  if (scsi_populate_tag_msg(cmd, tag)) {
1311  switch (tag[0]) {
1312  case HEAD_OF_QUEUE_TAG:
1313  fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1314  break;
1315  case ORDERED_QUEUE_TAG:
1316  fcp_cmnd->task_attribute = TSK_ORDERED;
1317  break;
1318  default:
1319  fcp_cmnd->task_attribute = 0;
1320  break;
1321  }
1322  } else {
1323  fcp_cmnd->task_attribute = 0;
1324  }
1325 
1326  cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1327 
1328  /* Compute dif len and adjust data len to incude protection */
1329  dif_bytes = 0;
1330  blk_size = cmd->device->sector_size;
1331  dif_bytes = (data_bytes / blk_size) * 8;
1332 
1333  switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1334  case SCSI_PROT_READ_INSERT:
1335  case SCSI_PROT_WRITE_STRIP:
1336  total_bytes = data_bytes;
1337  data_bytes += dif_bytes;
1338  break;
1339 
1340  case SCSI_PROT_READ_STRIP:
1342  case SCSI_PROT_READ_PASS:
1343  case SCSI_PROT_WRITE_PASS:
1344  total_bytes = data_bytes + dif_bytes;
1345  break;
1346  default:
1347  BUG();
1348  }
1349 
1350  if (!qla2x00_hba_err_chk_enabled(sp))
1351  fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1352  /* HBA error checking enabled */
1353  else if (IS_PI_UNINIT_CAPABLE(ha)) {
1354  if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1355  || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1357  fw_prot_opts |= BIT_10;
1358  else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1360  fw_prot_opts |= BIT_11;
1361  }
1362 
1363  if (!bundling) {
1364  cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1365  } else {
1366  /*
1367  * Configure Bundling if we need to fetch interlaving
1368  * protection PCI accesses
1369  */
1370  fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1371  crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1372  crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1373  tot_prot_dsds);
1374  cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1375  }
1376 
1377  /* Finish the common fields of CRC pkt */
1378  crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1379  crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1380  crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1381  crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1382  /* Fibre channel byte count */
1383  cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1384  fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1385  additional_fcpcdb_len);
1386  *fcp_dl = htonl(total_bytes);
1387 
1388  if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1389  cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1390  return QLA_SUCCESS;
1391  }
1392  /* Walks data segments */
1393 
1394  cmd_pkt->control_flags |=
1396 
1397  if (!bundling && tot_prot_dsds) {
1398  if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1399  cur_dsd, tot_dsds))
1400  goto crc_queuing_error;
1401  } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1402  (tot_dsds - tot_prot_dsds)))
1403  goto crc_queuing_error;
1404 
1405  if (bundling && tot_prot_dsds) {
1406  /* Walks dif segments */
1407  cur_seg = scsi_prot_sglist(cmd);
1408  cmd_pkt->control_flags |=
1410  cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1411  if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1412  tot_prot_dsds))
1413  goto crc_queuing_error;
1414  }
1415  return QLA_SUCCESS;
1416 
1417 crc_queuing_error:
1418  /* Cleanup will be performed by the caller */
1419 
1420  return QLA_FUNCTION_FAILED;
1421 }
1422 
1429 int
1431 {
1432  int ret, nseg;
1433  unsigned long flags;
1434  uint32_t *clr_ptr;
1435  uint32_t index;
1436  uint32_t handle;
1437  struct cmd_type_7 *cmd_pkt;
1438  uint16_t cnt;
1439  uint16_t req_cnt;
1440  uint16_t tot_dsds;
1441  struct req_que *req = NULL;
1442  struct rsp_que *rsp = NULL;
1443  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1444  struct scsi_qla_host *vha = sp->fcport->vha;
1445  struct qla_hw_data *ha = vha->hw;
1446  char tag[2];
1447 
1448  /* Setup device pointers. */
1449  ret = 0;
1450 
1451  qla25xx_set_que(sp, &rsp);
1452  req = vha->req;
1453 
1454  /* So we know we haven't pci_map'ed anything yet */
1455  tot_dsds = 0;
1456 
1457  /* Send marker if required */
1458  if (vha->marker_needed != 0) {
1459  if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1460  QLA_SUCCESS)
1461  return QLA_FUNCTION_FAILED;
1462  vha->marker_needed = 0;
1463  }
1464 
1465  /* Acquire ring specific lock */
1466  spin_lock_irqsave(&ha->hardware_lock, flags);
1467 
1468  /* Check for room in outstanding command list. */
1469  handle = req->current_outstanding_cmd;
1470  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1471  handle++;
1472  if (handle == MAX_OUTSTANDING_COMMANDS)
1473  handle = 1;
1474  if (!req->outstanding_cmds[handle])
1475  break;
1476  }
1477  if (index == MAX_OUTSTANDING_COMMANDS) {
1478  goto queuing_error;
1479  }
1480 
1481  /* Map the sg table so we have an accurate count of sg entries needed */
1482  if (scsi_sg_count(cmd)) {
1483  nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1484  scsi_sg_count(cmd), cmd->sc_data_direction);
1485  if (unlikely(!nseg))
1486  goto queuing_error;
1487  } else
1488  nseg = 0;
1489 
1490  tot_dsds = nseg;
1491  req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1492  if (req->cnt < (req_cnt + 2)) {
1493  cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1494 
1495  if (req->ring_index < cnt)
1496  req->cnt = cnt - req->ring_index;
1497  else
1498  req->cnt = req->length -
1499  (req->ring_index - cnt);
1500  if (req->cnt < (req_cnt + 2))
1501  goto queuing_error;
1502  }
1503 
1504  /* Build command packet. */
1506  req->outstanding_cmds[handle] = sp;
1507  sp->handle = handle;
1508  cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1509  req->cnt -= req_cnt;
1510 
1511  cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1512  cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1513 
1514  /* Zero out remaining portion of packet. */
1515  /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1516  clr_ptr = (uint32_t *)cmd_pkt + 2;
1517  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1518  cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1519 
1520  /* Set NPORT-ID and LUN number*/
1521  cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1522  cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1523  cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1524  cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1525  cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1526 
1527  int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1528  host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1529 
1530  /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1531  if (scsi_populate_tag_msg(cmd, tag)) {
1532  switch (tag[0]) {
1533  case HEAD_OF_QUEUE_TAG:
1534  cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1535  break;
1536  case ORDERED_QUEUE_TAG:
1537  cmd_pkt->task = TSK_ORDERED;
1538  break;
1539  }
1540  }
1541 
1542  /* Load SCSI command packet. */
1543  memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1544  host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1545 
1546  cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1547 
1548  /* Build IOCB segments */
1549  qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1550 
1551  /* Set total data segment count. */
1552  cmd_pkt->entry_count = (uint8_t)req_cnt;
1553  /* Specify response queue number where completion should happen */
1554  cmd_pkt->entry_status = (uint8_t) rsp->id;
1555  wmb();
1556  /* Adjust ring index. */
1557  req->ring_index++;
1558  if (req->ring_index == req->length) {
1559  req->ring_index = 0;
1560  req->ring_ptr = req->ring;
1561  } else
1562  req->ring_ptr++;
1563 
1564  sp->flags |= SRB_DMA_VALID;
1565 
1566  /* Set chip new ring index. */
1567  WRT_REG_DWORD(req->req_q_in, req->ring_index);
1568  RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1569 
1570  /* Manage unprocessed RIO/ZIO commands in response queue. */
1571  if (vha->flags.process_response_queue &&
1574 
1575  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1576  return QLA_SUCCESS;
1577 
1578 queuing_error:
1579  if (tot_dsds)
1580  scsi_dma_unmap(cmd);
1581 
1582  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1583 
1584  return QLA_FUNCTION_FAILED;
1585 }
1586 
1587 
1594 int
1596 {
1597  int nseg;
1598  unsigned long flags;
1599  uint32_t *clr_ptr;
1600  uint32_t index;
1601  uint32_t handle;
1602  uint16_t cnt;
1603  uint16_t req_cnt = 0;
1604  uint16_t tot_dsds;
1605  uint16_t tot_prot_dsds;
1606  uint16_t fw_prot_opts = 0;
1607  struct req_que *req = NULL;
1608  struct rsp_que *rsp = NULL;
1609  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1610  struct scsi_qla_host *vha = sp->fcport->vha;
1611  struct qla_hw_data *ha = vha->hw;
1612  struct cmd_type_crc_2 *cmd_pkt;
1613  uint32_t status = 0;
1614 
1615 #define QDSS_GOT_Q_SPACE BIT_0
1616 
1617  /* Only process protection or >16 cdb in this routine */
1618  if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1619  if (cmd->cmd_len <= 16)
1620  return qla24xx_start_scsi(sp);
1621  }
1622 
1623  /* Setup device pointers. */
1624 
1625  qla25xx_set_que(sp, &rsp);
1626  req = vha->req;
1627 
1628  /* So we know we haven't pci_map'ed anything yet */
1629  tot_dsds = 0;
1630 
1631  /* Send marker if required */
1632  if (vha->marker_needed != 0) {
1633  if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1634  QLA_SUCCESS)
1635  return QLA_FUNCTION_FAILED;
1636  vha->marker_needed = 0;
1637  }
1638 
1639  /* Acquire ring specific lock */
1640  spin_lock_irqsave(&ha->hardware_lock, flags);
1641 
1642  /* Check for room in outstanding command list. */
1643  handle = req->current_outstanding_cmd;
1644  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1645  handle++;
1646  if (handle == MAX_OUTSTANDING_COMMANDS)
1647  handle = 1;
1648  if (!req->outstanding_cmds[handle])
1649  break;
1650  }
1651 
1652  if (index == MAX_OUTSTANDING_COMMANDS)
1653  goto queuing_error;
1654 
1655  /* Compute number of required data segments */
1656  /* Map the sg table so we have an accurate count of sg entries needed */
1657  if (scsi_sg_count(cmd)) {
1658  nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1659  scsi_sg_count(cmd), cmd->sc_data_direction);
1660  if (unlikely(!nseg))
1661  goto queuing_error;
1662  else
1663  sp->flags |= SRB_DMA_VALID;
1664 
1665  if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1666  (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1667  struct qla2_sgx sgx;
1668  uint32_t partial;
1669 
1670  memset(&sgx, 0, sizeof(struct qla2_sgx));
1671  sgx.tot_bytes = scsi_bufflen(cmd);
1672  sgx.cur_sg = scsi_sglist(cmd);
1673  sgx.sp = sp;
1674 
1675  nseg = 0;
1676  while (qla24xx_get_one_block_sg(
1677  cmd->device->sector_size, &sgx, &partial))
1678  nseg++;
1679  }
1680  } else
1681  nseg = 0;
1682 
1683  /* number of required data segments */
1684  tot_dsds = nseg;
1685 
1686  /* Compute number of required protection segments */
1687  if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1688  nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1689  scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1690  if (unlikely(!nseg))
1691  goto queuing_error;
1692  else
1694 
1695  if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1696  (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1697  nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1698  }
1699  } else {
1700  nseg = 0;
1701  }
1702 
1703  req_cnt = 1;
1704  /* Total Data and protection sg segment(s) */
1705  tot_prot_dsds = nseg;
1706  tot_dsds += nseg;
1707  if (req->cnt < (req_cnt + 2)) {
1708  cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1709 
1710  if (req->ring_index < cnt)
1711  req->cnt = cnt - req->ring_index;
1712  else
1713  req->cnt = req->length -
1714  (req->ring_index - cnt);
1715  if (req->cnt < (req_cnt + 2))
1716  goto queuing_error;
1717  }
1718 
1719  status |= QDSS_GOT_Q_SPACE;
1720 
1721  /* Build header part of command packet (excluding the OPCODE). */
1723  req->outstanding_cmds[handle] = sp;
1724  sp->handle = handle;
1725  cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1726  req->cnt -= req_cnt;
1727 
1728  /* Fill-in common area */
1729  cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1730  cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1731 
1732  clr_ptr = (uint32_t *)cmd_pkt + 2;
1733  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1734 
1735  /* Set NPORT-ID and LUN number*/
1736  cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1737  cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1738  cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1739  cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1740 
1741  int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1742  host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1743 
1744  /* Total Data and protection segment(s) */
1745  cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1746 
1747  /* Build IOCB segments and adjust for data protection segments */
1748  if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1749  req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1750  QLA_SUCCESS)
1751  goto queuing_error;
1752 
1753  cmd_pkt->entry_count = (uint8_t)req_cnt;
1754  /* Specify response queue number where completion should happen */
1755  cmd_pkt->entry_status = (uint8_t) rsp->id;
1756  cmd_pkt->timeout = __constant_cpu_to_le16(0);
1757  wmb();
1758 
1759  /* Adjust ring index. */
1760  req->ring_index++;
1761  if (req->ring_index == req->length) {
1762  req->ring_index = 0;
1763  req->ring_ptr = req->ring;
1764  } else
1765  req->ring_ptr++;
1766 
1767  /* Set chip new ring index. */
1768  WRT_REG_DWORD(req->req_q_in, req->ring_index);
1769  RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1770 
1771  /* Manage unprocessed RIO/ZIO commands in response queue. */
1772  if (vha->flags.process_response_queue &&
1775 
1776  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1777 
1778  return QLA_SUCCESS;
1779 
1780 queuing_error:
1781  if (status & QDSS_GOT_Q_SPACE) {
1782  req->outstanding_cmds[handle] = NULL;
1783  req->cnt += req_cnt;
1784  }
1785  /* Cleanup will be performed by the caller (queuecommand) */
1786 
1787  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1788  return QLA_FUNCTION_FAILED;
1789 }
1790 
1791 
1792 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1793 {
1794  struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1795  struct qla_hw_data *ha = sp->fcport->vha->hw;
1796  int affinity = cmd->request->cpu;
1797 
1798  if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1799  affinity < ha->max_rsp_queues - 1)
1800  *rsp = ha->rsp_q_map[affinity + 1];
1801  else
1802  *rsp = ha->rsp_q_map[0];
1803 }
1804 
1805 /* Generic Control-SRB manipulation functions. */
1806 void *
1808 {
1809  struct qla_hw_data *ha = vha->hw;
1810  struct req_que *req = ha->req_q_map[0];
1811  device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1813  request_t *pkt;
1814  uint16_t cnt, req_cnt;
1815 
1816  pkt = NULL;
1817  req_cnt = 1;
1818  handle = 0;
1819 
1820  if (!sp)
1821  goto skip_cmd_array;
1822 
1823  /* Check for room in outstanding command list. */
1824  handle = req->current_outstanding_cmd;
1825  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1826  handle++;
1827  if (handle == MAX_OUTSTANDING_COMMANDS)
1828  handle = 1;
1829  if (!req->outstanding_cmds[handle])
1830  break;
1831  }
1832  if (index == MAX_OUTSTANDING_COMMANDS) {
1833  ql_log(ql_log_warn, vha, 0x700b,
1834  "No room on outstanding cmd array.\n");
1835  goto queuing_error;
1836  }
1837 
1838  /* Prep command array. */
1840  req->outstanding_cmds[handle] = sp;
1841  sp->handle = handle;
1842 
1843  /* Adjust entry-counts as needed. */
1844  if (sp->type != SRB_SCSI_CMD)
1845  req_cnt = sp->iocbs;
1846 
1847 skip_cmd_array:
1848  /* Check for room on request queue. */
1849  if (req->cnt < req_cnt) {
1850  if (ha->mqenable || IS_QLA83XX(ha))
1851  cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1852  else if (IS_QLA82XX(ha))
1853  cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1854  else if (IS_FWI2_CAPABLE(ha))
1855  cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1856  else
1857  cnt = qla2x00_debounce_register(
1858  ISP_REQ_Q_OUT(ha, &reg->isp));
1859 
1860  if (req->ring_index < cnt)
1861  req->cnt = cnt - req->ring_index;
1862  else
1863  req->cnt = req->length -
1864  (req->ring_index - cnt);
1865  }
1866  if (req->cnt < req_cnt)
1867  goto queuing_error;
1868 
1869  /* Prep packet */
1870  req->cnt -= req_cnt;
1871  pkt = req->ring_ptr;
1872  memset(pkt, 0, REQUEST_ENTRY_SIZE);
1873  pkt->entry_count = req_cnt;
1874  pkt->handle = handle;
1875 
1876 queuing_error:
1877  return pkt;
1878 }
1879 
1880 static void
1881 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1882 {
1883  struct srb_iocb *lio = &sp->u.iocb_cmd;
1884 
1887  if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1889  if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1891  logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1892  logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1893  logio->port_id[1] = sp->fcport->d_id.b.area;
1894  logio->port_id[2] = sp->fcport->d_id.b.domain;
1895  logio->vp_index = sp->fcport->vha->vp_idx;
1896 }
1897 
1898 static void
1899 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1900 {
1901  struct qla_hw_data *ha = sp->fcport->vha->hw;
1902  struct srb_iocb *lio = &sp->u.iocb_cmd;
1903  uint16_t opts;
1904 
1905  mbx->entry_type = MBX_IOCB_TYPE;
1906  SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1908  opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1909  opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1910  if (HAS_EXTENDED_IDS(ha)) {
1911  mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1912  mbx->mb10 = cpu_to_le16(opts);
1913  } else {
1914  mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1915  }
1916  mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1917  mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1918  sp->fcport->d_id.b.al_pa);
1919  mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1920 }
1921 
1922 static void
1923 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1924 {
1926  logio->control_flags =
1928  logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1929  logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1930  logio->port_id[1] = sp->fcport->d_id.b.area;
1931  logio->port_id[2] = sp->fcport->d_id.b.domain;
1932  logio->vp_index = sp->fcport->vha->vp_idx;
1933 }
1934 
1935 static void
1936 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1937 {
1938  struct qla_hw_data *ha = sp->fcport->vha->hw;
1939 
1940  mbx->entry_type = MBX_IOCB_TYPE;
1941  SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1943  mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1944  cpu_to_le16(sp->fcport->loop_id):
1945  cpu_to_le16(sp->fcport->loop_id << 8);
1946  mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1947  mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1948  sp->fcport->d_id.b.al_pa);
1949  mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1950  /* Implicit: mbx->mbx10 = 0. */
1951 }
1952 
1953 static void
1954 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1955 {
1958  logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1959  logio->vp_index = sp->fcport->vha->vp_idx;
1960 }
1961 
1962 static void
1963 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1964 {
1965  struct qla_hw_data *ha = sp->fcport->vha->hw;
1966 
1967  mbx->entry_type = MBX_IOCB_TYPE;
1968  SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1970  if (HAS_EXTENDED_IDS(ha)) {
1971  mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1972  mbx->mb10 = cpu_to_le16(BIT_0);
1973  } else {
1974  mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1975  }
1976  mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1977  mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1978  mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1979  mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1980  mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1981 }
1982 
1983 static void
1984 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1985 {
1986  uint32_t flags;
1987  unsigned int lun;
1988  struct fc_port *fcport = sp->fcport;
1989  scsi_qla_host_t *vha = fcport->vha;
1990  struct qla_hw_data *ha = vha->hw;
1991  struct srb_iocb *iocb = &sp->u.iocb_cmd;
1992  struct req_que *req = vha->req;
1993 
1994  flags = iocb->u.tmf.flags;
1995  lun = iocb->u.tmf.lun;
1996 
1998  tsk->entry_count = 1;
1999  tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2000  tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2001  tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2002  tsk->control_flags = cpu_to_le32(flags);
2003  tsk->port_id[0] = fcport->d_id.b.al_pa;
2004  tsk->port_id[1] = fcport->d_id.b.area;
2005  tsk->port_id[2] = fcport->d_id.b.domain;
2006  tsk->vp_index = fcport->vha->vp_idx;
2007 
2008  if (flags == TCF_LUN_RESET) {
2009  int_to_scsilun(lun, &tsk->lun);
2010  host_to_fcp_swap((uint8_t *)&tsk->lun,
2011  sizeof(tsk->lun));
2012  }
2013 }
2014 
2015 static void
2016 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2017 {
2018  struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2019 
2020  els_iocb->entry_type = ELS_IOCB_TYPE;
2021  els_iocb->entry_count = 1;
2022  els_iocb->sys_define = 0;
2023  els_iocb->entry_status = 0;
2024  els_iocb->handle = sp->handle;
2025  els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2026  els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2027  els_iocb->vp_index = sp->fcport->vha->vp_idx;
2028  els_iocb->sof_type = EST_SOFI3;
2029  els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2030 
2031  els_iocb->opcode =
2032  sp->type == SRB_ELS_CMD_RPT ?
2033  bsg_job->request->rqst_data.r_els.els_code :
2034  bsg_job->request->rqst_data.h_els.command_code;
2035  els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2036  els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2037  els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2038  els_iocb->control_flags = 0;
2039  els_iocb->rx_byte_count =
2040  cpu_to_le32(bsg_job->reply_payload.payload_len);
2041  els_iocb->tx_byte_count =
2042  cpu_to_le32(bsg_job->request_payload.payload_len);
2043 
2044  els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2045  (bsg_job->request_payload.sg_list)));
2046  els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2047  (bsg_job->request_payload.sg_list)));
2048  els_iocb->tx_len = cpu_to_le32(sg_dma_len
2049  (bsg_job->request_payload.sg_list));
2050 
2051  els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2052  (bsg_job->reply_payload.sg_list)));
2053  els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2054  (bsg_job->reply_payload.sg_list)));
2055  els_iocb->rx_len = cpu_to_le32(sg_dma_len
2056  (bsg_job->reply_payload.sg_list));
2057 }
2058 
2059 static void
2060 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2061 {
2062  uint16_t avail_dsds;
2063  uint32_t *cur_dsd;
2064  struct scatterlist *sg;
2065  int index;
2066  uint16_t tot_dsds;
2067  scsi_qla_host_t *vha = sp->fcport->vha;
2068  struct qla_hw_data *ha = vha->hw;
2069  struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2070  int loop_iterartion = 0;
2071  int cont_iocb_prsnt = 0;
2072  int entry_count = 1;
2073 
2074  memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2075  ct_iocb->entry_type = CT_IOCB_TYPE;
2076  ct_iocb->entry_status = 0;
2077  ct_iocb->handle1 = sp->handle;
2078  SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2079  ct_iocb->status = __constant_cpu_to_le16(0);
2080  ct_iocb->control_flags = __constant_cpu_to_le16(0);
2081  ct_iocb->timeout = 0;
2082  ct_iocb->cmd_dsd_count =
2083  __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2084  ct_iocb->total_dsd_count =
2085  __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2086  ct_iocb->req_bytecount =
2087  cpu_to_le32(bsg_job->request_payload.payload_len);
2088  ct_iocb->rsp_bytecount =
2089  cpu_to_le32(bsg_job->reply_payload.payload_len);
2090 
2092  (bsg_job->request_payload.sg_list)));
2094  (bsg_job->request_payload.sg_list)));
2095  ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2096 
2098  (bsg_job->reply_payload.sg_list)));
2100  (bsg_job->reply_payload.sg_list)));
2101  ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2102 
2103  avail_dsds = 1;
2104  cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2105  index = 0;
2106  tot_dsds = bsg_job->reply_payload.sg_cnt;
2107 
2108  for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2109  dma_addr_t sle_dma;
2110  cont_a64_entry_t *cont_pkt;
2111 
2112  /* Allocate additional continuation packets? */
2113  if (avail_dsds == 0) {
2114  /*
2115  * Five DSDs are available in the Cont.
2116  * Type 1 IOCB.
2117  */
2118  cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2119  vha->hw->req_q_map[0]);
2120  cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2121  avail_dsds = 5;
2122  cont_iocb_prsnt = 1;
2123  entry_count++;
2124  }
2125 
2126  sle_dma = sg_dma_address(sg);
2127  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2128  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2129  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2130  loop_iterartion++;
2131  avail_dsds--;
2132  }
2133  ct_iocb->entry_count = entry_count;
2134 }
2135 
2136 static void
2137 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2138 {
2139  uint16_t avail_dsds;
2140  uint32_t *cur_dsd;
2141  struct scatterlist *sg;
2142  int index;
2143  uint16_t tot_dsds;
2144  scsi_qla_host_t *vha = sp->fcport->vha;
2145  struct qla_hw_data *ha = vha->hw;
2146  struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2147  int loop_iterartion = 0;
2148  int cont_iocb_prsnt = 0;
2149  int entry_count = 1;
2150 
2151  ct_iocb->entry_type = CT_IOCB_TYPE;
2152  ct_iocb->entry_status = 0;
2153  ct_iocb->sys_define = 0;
2154  ct_iocb->handle = sp->handle;
2155 
2156  ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2157  ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2158  ct_iocb->comp_status = __constant_cpu_to_le16(0);
2159 
2160  ct_iocb->cmd_dsd_count =
2161  __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2162  ct_iocb->timeout = 0;
2163  ct_iocb->rsp_dsd_count =
2164  __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2165  ct_iocb->rsp_byte_count =
2166  cpu_to_le32(bsg_job->reply_payload.payload_len);
2167  ct_iocb->cmd_byte_count =
2168  cpu_to_le32(bsg_job->request_payload.payload_len);
2170  (bsg_job->request_payload.sg_list)));
2172  (bsg_job->request_payload.sg_list)));
2173  ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2174  (bsg_job->request_payload.sg_list));
2175 
2176  avail_dsds = 1;
2177  cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2178  index = 0;
2179  tot_dsds = bsg_job->reply_payload.sg_cnt;
2180 
2181  for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2182  dma_addr_t sle_dma;
2183  cont_a64_entry_t *cont_pkt;
2184 
2185  /* Allocate additional continuation packets? */
2186  if (avail_dsds == 0) {
2187  /*
2188  * Five DSDs are available in the Cont.
2189  * Type 1 IOCB.
2190  */
2191  cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2192  ha->req_q_map[0]);
2193  cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2194  avail_dsds = 5;
2195  cont_iocb_prsnt = 1;
2196  entry_count++;
2197  }
2198 
2199  sle_dma = sg_dma_address(sg);
2200  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2201  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2202  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2203  loop_iterartion++;
2204  avail_dsds--;
2205  }
2206  ct_iocb->entry_count = entry_count;
2207 }
2208 
2209 /*
2210  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2211  * @sp: command to send to the ISP
2212  *
2213  * Returns non-zero if a failure occurred, else zero.
2214  */
2215 int
2217 {
2218  int ret, nseg;
2219  unsigned long flags;
2220  struct scsi_cmnd *cmd;
2221  uint32_t *clr_ptr;
2222  uint32_t index;
2223  uint32_t handle;
2224  uint16_t cnt;
2225  uint16_t req_cnt;
2226  uint16_t tot_dsds;
2227  struct device_reg_82xx __iomem *reg;
2228  uint32_t dbval;
2229  uint32_t *fcp_dl;
2231  struct ct6_dsd *ctx;
2232  struct scsi_qla_host *vha = sp->fcport->vha;
2233  struct qla_hw_data *ha = vha->hw;
2234  struct req_que *req = NULL;
2235  struct rsp_que *rsp = NULL;
2236  char tag[2];
2237 
2238  /* Setup device pointers. */
2239  ret = 0;
2240  reg = &ha->iobase->isp82;
2241  cmd = GET_CMD_SP(sp);
2242  req = vha->req;
2243  rsp = ha->rsp_q_map[0];
2244 
2245  /* So we know we haven't pci_map'ed anything yet */
2246  tot_dsds = 0;
2247 
2248  dbval = 0x04 | (ha->portnum << 5);
2249 
2250  /* Send marker if required */
2251  if (vha->marker_needed != 0) {
2252  if (qla2x00_marker(vha, req,
2253  rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2254  ql_log(ql_log_warn, vha, 0x300c,
2255  "qla2x00_marker failed for cmd=%p.\n", cmd);
2256  return QLA_FUNCTION_FAILED;
2257  }
2258  vha->marker_needed = 0;
2259  }
2260 
2261  /* Acquire ring specific lock */
2262  spin_lock_irqsave(&ha->hardware_lock, flags);
2263 
2264  /* Check for room in outstanding command list. */
2265  handle = req->current_outstanding_cmd;
2266  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2267  handle++;
2268  if (handle == MAX_OUTSTANDING_COMMANDS)
2269  handle = 1;
2270  if (!req->outstanding_cmds[handle])
2271  break;
2272  }
2273  if (index == MAX_OUTSTANDING_COMMANDS)
2274  goto queuing_error;
2275 
2276  /* Map the sg table so we have an accurate count of sg entries needed */
2277  if (scsi_sg_count(cmd)) {
2278  nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2279  scsi_sg_count(cmd), cmd->sc_data_direction);
2280  if (unlikely(!nseg))
2281  goto queuing_error;
2282  } else
2283  nseg = 0;
2284 
2285  tot_dsds = nseg;
2286 
2287  if (tot_dsds > ql2xshiftctondsd) {
2288  struct cmd_type_6 *cmd_pkt;
2289  uint16_t more_dsd_lists = 0;
2290  struct dsd_dma *dsd_ptr;
2291  uint16_t i;
2292 
2293  more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2294  if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2295  ql_dbg(ql_dbg_io, vha, 0x300d,
2296  "Num of DSD list %d is than %d for cmd=%p.\n",
2297  more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2298  cmd);
2299  goto queuing_error;
2300  }
2301 
2302  if (more_dsd_lists <= ha->gbl_dsd_avail)
2303  goto sufficient_dsds;
2304  else
2305  more_dsd_lists -= ha->gbl_dsd_avail;
2306 
2307  for (i = 0; i < more_dsd_lists; i++) {
2308  dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2309  if (!dsd_ptr) {
2310  ql_log(ql_log_fatal, vha, 0x300e,
2311  "Failed to allocate memory for dsd_dma "
2312  "for cmd=%p.\n", cmd);
2313  goto queuing_error;
2314  }
2315 
2316  dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2317  GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2318  if (!dsd_ptr->dsd_addr) {
2319  kfree(dsd_ptr);
2320  ql_log(ql_log_fatal, vha, 0x300f,
2321  "Failed to allocate memory for dsd_addr "
2322  "for cmd=%p.\n", cmd);
2323  goto queuing_error;
2324  }
2325  list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2326  ha->gbl_dsd_avail++;
2327  }
2328 
2329 sufficient_dsds:
2330  req_cnt = 1;
2331 
2332  if (req->cnt < (req_cnt + 2)) {
2334  &reg->req_q_out[0]);
2335  if (req->ring_index < cnt)
2336  req->cnt = cnt - req->ring_index;
2337  else
2338  req->cnt = req->length -
2339  (req->ring_index - cnt);
2340  if (req->cnt < (req_cnt + 2))
2341  goto queuing_error;
2342  }
2343 
2344  ctx = sp->u.scmd.ctx =
2346  if (!ctx) {
2347  ql_log(ql_log_fatal, vha, 0x3010,
2348  "Failed to allocate ctx for cmd=%p.\n", cmd);
2349  goto queuing_error;
2350  }
2351 
2352  memset(ctx, 0, sizeof(struct ct6_dsd));
2354  GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2355  if (!ctx->fcp_cmnd) {
2356  ql_log(ql_log_fatal, vha, 0x3011,
2357  "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2358  goto queuing_error;
2359  }
2360 
2361  /* Initialize the DSD list and dma handle */
2362  INIT_LIST_HEAD(&ctx->dsd_list);
2363  ctx->dsd_use_cnt = 0;
2364 
2365  if (cmd->cmd_len > 16) {
2366  additional_cdb_len = cmd->cmd_len - 16;
2367  if ((cmd->cmd_len % 4) != 0) {
2368  /* SCSI command bigger than 16 bytes must be
2369  * multiple of 4
2370  */
2371  ql_log(ql_log_warn, vha, 0x3012,
2372  "scsi cmd len %d not multiple of 4 "
2373  "for cmd=%p.\n", cmd->cmd_len, cmd);
2374  goto queuing_error_fcp_cmnd;
2375  }
2376  ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2377  } else {
2378  additional_cdb_len = 0;
2379  ctx->fcp_cmnd_len = 12 + 16 + 4;
2380  }
2381 
2382  cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2383  cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2384 
2385  /* Zero out remaining portion of packet. */
2386  /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2387  clr_ptr = (uint32_t *)cmd_pkt + 2;
2388  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2389  cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2390 
2391  /* Set NPORT-ID and LUN number*/
2392  cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2393  cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2394  cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2395  cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2396  cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2397 
2398  /* Build IOCB segments */
2399  if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2400  goto queuing_error_fcp_cmnd;
2401 
2402  int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2403  host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2404 
2405  /* build FCP_CMND IU */
2406  memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2407  int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2408  ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2409 
2410  if (cmd->sc_data_direction == DMA_TO_DEVICE)
2411  ctx->fcp_cmnd->additional_cdb_len |= 1;
2412  else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2413  ctx->fcp_cmnd->additional_cdb_len |= 2;
2414 
2415  /*
2416  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2417  */
2418  if (scsi_populate_tag_msg(cmd, tag)) {
2419  switch (tag[0]) {
2420  case HEAD_OF_QUEUE_TAG:
2421  ctx->fcp_cmnd->task_attribute =
2423  break;
2424  case ORDERED_QUEUE_TAG:
2425  ctx->fcp_cmnd->task_attribute =
2426  TSK_ORDERED;
2427  break;
2428  }
2429  }
2430 
2431  /* Populate the FCP_PRIO. */
2432  if (ha->flags.fcp_prio_enabled)
2433  ctx->fcp_cmnd->task_attribute |=
2434  sp->fcport->fcp_prio << 3;
2435 
2436  memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2437 
2438  fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2439  additional_cdb_len);
2440  *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2441 
2442  cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2443  cmd_pkt->fcp_cmnd_dseg_address[0] =
2444  cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2445  cmd_pkt->fcp_cmnd_dseg_address[1] =
2446  cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2447 
2449  cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2450  /* Set total data segment count. */
2451  cmd_pkt->entry_count = (uint8_t)req_cnt;
2452  /* Specify response queue number where
2453  * completion should happen
2454  */
2455  cmd_pkt->entry_status = (uint8_t) rsp->id;
2456  } else {
2457  struct cmd_type_7 *cmd_pkt;
2458  req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2459  if (req->cnt < (req_cnt + 2)) {
2461  &reg->req_q_out[0]);
2462  if (req->ring_index < cnt)
2463  req->cnt = cnt - req->ring_index;
2464  else
2465  req->cnt = req->length -
2466  (req->ring_index - cnt);
2467  }
2468  if (req->cnt < (req_cnt + 2))
2469  goto queuing_error;
2470 
2471  cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2472  cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2473 
2474  /* Zero out remaining portion of packet. */
2475  /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2476  clr_ptr = (uint32_t *)cmd_pkt + 2;
2477  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2478  cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2479 
2480  /* Set NPORT-ID and LUN number*/
2481  cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2482  cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2483  cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2484  cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2485  cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2486 
2487  int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2488  host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2489  sizeof(cmd_pkt->lun));
2490 
2491  /*
2492  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2493  */
2494  if (scsi_populate_tag_msg(cmd, tag)) {
2495  switch (tag[0]) {
2496  case HEAD_OF_QUEUE_TAG:
2497  cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2498  break;
2499  case ORDERED_QUEUE_TAG:
2500  cmd_pkt->task = TSK_ORDERED;
2501  break;
2502  }
2503  }
2504 
2505  /* Populate the FCP_PRIO. */
2506  if (ha->flags.fcp_prio_enabled)
2507  cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2508 
2509  /* Load SCSI command packet. */
2510  memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2511  host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2512 
2513  cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2514 
2515  /* Build IOCB segments */
2516  qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2517 
2518  /* Set total data segment count. */
2519  cmd_pkt->entry_count = (uint8_t)req_cnt;
2520  /* Specify response queue number where
2521  * completion should happen.
2522  */
2523  cmd_pkt->entry_status = (uint8_t) rsp->id;
2524 
2525  }
2526  /* Build command packet. */
2528  req->outstanding_cmds[handle] = sp;
2529  sp->handle = handle;
2530  cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2531  req->cnt -= req_cnt;
2532  wmb();
2533 
2534  /* Adjust ring index. */
2535  req->ring_index++;
2536  if (req->ring_index == req->length) {
2537  req->ring_index = 0;
2538  req->ring_ptr = req->ring;
2539  } else
2540  req->ring_ptr++;
2541 
2542  sp->flags |= SRB_DMA_VALID;
2543 
2544  /* Set chip new ring index. */
2545  /* write, read and verify logic */
2546  dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2547  if (ql2xdbwr)
2548  qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2549  else {
2550  WRT_REG_DWORD(
2551  (unsigned long __iomem *)ha->nxdb_wr_ptr,
2552  dbval);
2553  wmb();
2554  while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2555  WRT_REG_DWORD(
2556  (unsigned long __iomem *)ha->nxdb_wr_ptr,
2557  dbval);
2558  wmb();
2559  }
2560  }
2561 
2562  /* Manage unprocessed RIO/ZIO commands in response queue. */
2563  if (vha->flags.process_response_queue &&
2566 
2567  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2568  return QLA_SUCCESS;
2569 
2570 queuing_error_fcp_cmnd:
2572 queuing_error:
2573  if (tot_dsds)
2574  scsi_dma_unmap(cmd);
2575 
2576  if (sp->u.scmd.ctx) {
2577  mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2578  sp->u.scmd.ctx = NULL;
2579  }
2580  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2581 
2582  return QLA_FUNCTION_FAILED;
2583 }
2584 
2585 int
2587 {
2588  int rval;
2589  struct qla_hw_data *ha = sp->fcport->vha->hw;
2590  void *pkt;
2591  unsigned long flags;
2592 
2593  rval = QLA_FUNCTION_FAILED;
2594  spin_lock_irqsave(&ha->hardware_lock, flags);
2595  pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2596  if (!pkt) {
2597  ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2598  "qla2x00_alloc_iocbs failed.\n");
2599  goto done;
2600  }
2601 
2602  rval = QLA_SUCCESS;
2603  switch (sp->type) {
2604  case SRB_LOGIN_CMD:
2605  IS_FWI2_CAPABLE(ha) ?
2606  qla24xx_login_iocb(sp, pkt) :
2607  qla2x00_login_iocb(sp, pkt);
2608  break;
2609  case SRB_LOGOUT_CMD:
2610  IS_FWI2_CAPABLE(ha) ?
2611  qla24xx_logout_iocb(sp, pkt) :
2612  qla2x00_logout_iocb(sp, pkt);
2613  break;
2614  case SRB_ELS_CMD_RPT:
2615  case SRB_ELS_CMD_HST:
2616  qla24xx_els_iocb(sp, pkt);
2617  break;
2618  case SRB_CT_CMD:
2619  IS_FWI2_CAPABLE(ha) ?
2620  qla24xx_ct_iocb(sp, pkt) :
2621  qla2x00_ct_iocb(sp, pkt);
2622  break;
2623  case SRB_ADISC_CMD:
2624  IS_FWI2_CAPABLE(ha) ?
2625  qla24xx_adisc_iocb(sp, pkt) :
2626  qla2x00_adisc_iocb(sp, pkt);
2627  break;
2628  case SRB_TM_CMD:
2629  qla24xx_tm_iocb(sp, pkt);
2630  break;
2631  default:
2632  break;
2633  }
2634 
2635  wmb();
2636  qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2637 done:
2638  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2639  return rval;
2640 }
2641 
2642 static void
2643 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2644  struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2645 {
2646  uint16_t avail_dsds;
2647  uint32_t *cur_dsd;
2648  uint32_t req_data_len = 0;
2649  uint32_t rsp_data_len = 0;
2650  struct scatterlist *sg;
2651  int index;
2652  int entry_count = 1;
2653  struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2654 
2655  /*Update entry type to indicate bidir command */
2656  *((uint32_t *)(&cmd_pkt->entry_type)) =
2658 
2659  /* Set the transfer direction, in this set both flags
2660  * Also set the BD_WRAP_BACK flag, firmware will take care
2661  * assigning DID=SID for outgoing pkts.
2662  */
2663  cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2664  cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2665  cmd_pkt->control_flags =
2667  BD_WRAP_BACK);
2668 
2669  req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2670  cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2671  cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2672  cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2673 
2674  vha->bidi_stats.transfer_bytes += req_data_len;
2675  vha->bidi_stats.io_count++;
2676 
2677  /* Only one dsd is available for bidirectional IOCB, remaining dsds
2678  * are bundled in continuation iocb
2679  */
2680  avail_dsds = 1;
2681  cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2682 
2683  index = 0;
2684 
2685  for_each_sg(bsg_job->request_payload.sg_list, sg,
2686  bsg_job->request_payload.sg_cnt, index) {
2687  dma_addr_t sle_dma;
2688  cont_a64_entry_t *cont_pkt;
2689 
2690  /* Allocate additional continuation packets */
2691  if (avail_dsds == 0) {
2692  /* Continuation type 1 IOCB can accomodate
2693  * 5 DSDS
2694  */
2695  cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2696  cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2697  avail_dsds = 5;
2698  entry_count++;
2699  }
2700  sle_dma = sg_dma_address(sg);
2701  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2702  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2703  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2704  avail_dsds--;
2705  }
2706  /* For read request DSD will always goes to continuation IOCB
2707  * and follow the write DSD. If there is room on the current IOCB
2708  * then it is added to that IOCB else new continuation IOCB is
2709  * allocated.
2710  */
2711  for_each_sg(bsg_job->reply_payload.sg_list, sg,
2712  bsg_job->reply_payload.sg_cnt, index) {
2713  dma_addr_t sle_dma;
2714  cont_a64_entry_t *cont_pkt;
2715 
2716  /* Allocate additional continuation packets */
2717  if (avail_dsds == 0) {
2718  /* Continuation type 1 IOCB can accomodate
2719  * 5 DSDS
2720  */
2721  cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2722  cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2723  avail_dsds = 5;
2724  entry_count++;
2725  }
2726  sle_dma = sg_dma_address(sg);
2727  *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2728  *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2729  *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2730  avail_dsds--;
2731  }
2732  /* This value should be same as number of IOCB required for this cmd */
2733  cmd_pkt->entry_count = entry_count;
2734 }
2735 
2736 int
2738 {
2739 
2740  struct qla_hw_data *ha = vha->hw;
2741  unsigned long flags;
2742  uint32_t handle;
2743  uint32_t index;
2744  uint16_t req_cnt;
2745  uint16_t cnt;
2746  uint32_t *clr_ptr;
2747  struct cmd_bidir *cmd_pkt = NULL;
2748  struct rsp_que *rsp;
2749  struct req_que *req;
2750  int rval = EXT_STATUS_OK;
2751  device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
2752 
2753  rval = QLA_SUCCESS;
2754 
2755  rsp = ha->rsp_q_map[0];
2756  req = vha->req;
2757 
2758  /* Send marker if required */
2759  if (vha->marker_needed != 0) {
2760  if (qla2x00_marker(vha, req,
2761  rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2762  return EXT_STATUS_MAILBOX;
2763  vha->marker_needed = 0;
2764  }
2765 
2766  /* Acquire ring specific lock */
2767  spin_lock_irqsave(&ha->hardware_lock, flags);
2768 
2769  /* Check for room in outstanding command list. */
2770  handle = req->current_outstanding_cmd;
2771  for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2772  handle++;
2773  if (handle == MAX_OUTSTANDING_COMMANDS)
2774  handle = 1;
2775  if (!req->outstanding_cmds[handle])
2776  break;
2777  }
2778 
2779  if (index == MAX_OUTSTANDING_COMMANDS) {
2780  rval = EXT_STATUS_BUSY;
2781  goto queuing_error;
2782  }
2783 
2784  /* Calculate number of IOCB required */
2785  req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2786 
2787  /* Check for room on request queue. */
2788  if (req->cnt < req_cnt + 2) {
2789  if (ha->mqenable)
2790  cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2791  else if (IS_QLA82XX(ha))
2792  cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2793  else if (IS_FWI2_CAPABLE(ha))
2794  cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2795  else
2796  cnt = qla2x00_debounce_register(
2797  ISP_REQ_Q_OUT(ha, &reg->isp));
2798 
2799  if (req->ring_index < cnt)
2800  req->cnt = cnt - req->ring_index;
2801  else
2802  req->cnt = req->length -
2803  (req->ring_index - cnt);
2804  }
2805  if (req->cnt < req_cnt + 2) {
2806  rval = EXT_STATUS_BUSY;
2807  goto queuing_error;
2808  }
2809 
2810  cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2811  cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2812 
2813  /* Zero out remaining portion of packet. */
2814  /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2815  clr_ptr = (uint32_t *)cmd_pkt + 2;
2816  memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2817 
2818  /* Set NPORT-ID (of vha)*/
2819  cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2820  cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2821  cmd_pkt->port_id[1] = vha->d_id.b.area;
2822  cmd_pkt->port_id[2] = vha->d_id.b.domain;
2823 
2824  qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2825  cmd_pkt->entry_status = (uint8_t) rsp->id;
2826  /* Build command packet. */
2827  req->current_outstanding_cmd = handle;
2828  req->outstanding_cmds[handle] = sp;
2829  sp->handle = handle;
2830  req->cnt -= req_cnt;
2831 
2832  /* Send the command to the firmware */
2833  wmb();
2834  qla2x00_start_iocbs(vha, req);
2835 queuing_error:
2836  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2837  return rval;
2838 }