Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ql4_iocb.c
Go to the documentation of this file.
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c) 2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
12 
13 #include <scsi/scsi_tcq.h>
14 
15 static int
16 qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
17 {
18  uint16_t cnt;
19 
20  /* Calculate number of free request entries. */
21  if ((req_cnt + 2) >= ha->req_q_count) {
22  cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
23  if (ha->request_in < cnt)
24  ha->req_q_count = cnt - ha->request_in;
25  else
27  (ha->request_in - cnt);
28  }
29 
30  /* Check if room for request in request ring. */
31  if ((req_cnt + 2) < ha->req_q_count)
32  return 1;
33  else
34  return 0;
35 }
36 
37 static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
38 {
39  /* Advance request queue pointer */
40  if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
41  ha->request_in = 0;
42  ha->request_ptr = ha->request_ring;
43  } else {
44  ha->request_in++;
45  ha->request_ptr++;
46  }
47 }
48 
59 static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
60  struct queue_entry **queue_entry)
61 {
62  uint16_t req_cnt = 1;
63 
64  if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
65  *queue_entry = ha->request_ptr;
66  memset(*queue_entry, 0, sizeof(**queue_entry));
67 
68  qla4xxx_advance_req_ring_ptr(ha);
69  ha->req_q_count -= req_cnt;
70  return QLA_SUCCESS;
71  }
72 
73  return QLA_ERROR;
74 }
75 
86  struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
87 {
88  struct qla4_marker_entry *marker_entry;
89  unsigned long flags = 0;
91 
92  /* Acquire hardware specific lock */
93  spin_lock_irqsave(&ha->hardware_lock, flags);
94 
95  /* Get pointer to the queue entry for the marker */
96  if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
97  QLA_SUCCESS) {
98  status = QLA_ERROR;
99  goto exit_send_marker;
100  }
101 
102  /* Put the marker in the request queue */
103  marker_entry->hdr.entryType = ET_MARKER;
104  marker_entry->hdr.entryCount = 1;
105  marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
106  marker_entry->modifier = cpu_to_le16(mrkr_mod);
107  int_to_scsilun(lun, &marker_entry->lun);
108  wmb();
109 
110  /* Tell ISP it's got a new I/O request */
111  ha->isp_ops->queue_iocb(ha);
112 
113 exit_send_marker:
114  spin_unlock_irqrestore(&ha->hardware_lock, flags);
115  return status;
116 }
117 
118 static struct continuation_t1_entry *
119 qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
120 {
122 
123  cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
124 
125  qla4xxx_advance_req_ring_ptr(ha);
126 
127  /* Load packet defaults */
128  cont_entry->hdr.entryType = ET_CONTINUE;
129  cont_entry->hdr.entryCount = 1;
130  cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
131 
132  return cont_entry;
133 }
134 
135 static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
136 {
137  uint16_t iocbs;
138 
139  iocbs = 1;
140  if (dsds > COMMAND_SEG) {
141  iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
142  if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
143  iocbs++;
144  }
145  return iocbs;
146 }
147 
148 static void qla4xxx_build_scsi_iocbs(struct srb *srb,
149  struct command_t3_entry *cmd_entry,
150  uint16_t tot_dsds)
151 {
152  struct scsi_qla_host *ha;
153  uint16_t avail_dsds;
154  struct data_seg_a64 *cur_dsd;
155  struct scsi_cmnd *cmd;
156  struct scatterlist *sg;
157  int i;
158 
159  cmd = srb->cmd;
160  ha = srb->ha;
161 
162  if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
163  /* No data being transferred */
164  cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
165  return;
166  }
167 
168  avail_dsds = COMMAND_SEG;
169  cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
170 
171  scsi_for_each_sg(cmd, sg, tot_dsds, i) {
172  dma_addr_t sle_dma;
173 
174  /* Allocate additional continuation packets? */
175  if (avail_dsds == 0) {
176  struct continuation_t1_entry *cont_entry;
177 
178  cont_entry = qla4xxx_alloc_cont_entry(ha);
179  cur_dsd =
180  (struct data_seg_a64 *)
181  &cont_entry->dataseg[0];
182  avail_dsds = CONTINUE_SEG;
183  }
184 
185  sle_dma = sg_dma_address(sg);
186  cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
187  cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
188  cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
189  avail_dsds--;
190 
191  cur_dsd++;
192  }
193 }
194 
196 {
197  writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
198  readl(&ha->qla4_83xx_reg->req_q_in);
199 }
200 
202 {
203  writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
204  readl(&ha->qla4_83xx_reg->rsp_q_out);
205 }
206 
215 {
216  uint32_t dbval = 0;
217 
218  dbval = 0x14 | (ha->func_num << 5);
219  dbval = dbval | (0 << 8) | (ha->request_in << 16);
220 
222 }
223 
233 {
234  writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
235  readl(&ha->qla4_82xx_reg->rsp_q_out);
236 }
237 
246 {
247  writel(ha->request_in, &ha->reg->req_q_in);
248  readl(&ha->reg->req_q_in);
249 }
250 
260 {
261  writel(ha->response_out, &ha->reg->rsp_q_out);
262  readl(&ha->reg->rsp_q_out);
263 }
264 
273 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
274 {
275  struct scsi_cmnd *cmd = srb->cmd;
276  struct ddb_entry *ddb_entry;
277  struct command_t3_entry *cmd_entry;
278  int nseg;
279  uint16_t tot_dsds;
280  uint16_t req_cnt;
281  unsigned long flags;
282  uint32_t index;
283  char tag[2];
284 
285  /* Get real lun and adapter */
286  ddb_entry = srb->ddb;
287 
288  tot_dsds = 0;
289 
290  /* Acquire hardware specific lock */
291  spin_lock_irqsave(&ha->hardware_lock, flags);
292 
293  index = (uint32_t)cmd->request->tag;
294 
295  /*
296  * Check to see if adapter is online before placing request on
297  * request queue. If a reset occurs and a request is in the queue,
298  * the firmware will still attempt to process the request, retrieving
299  * garbage for pointers.
300  */
301  if (!test_bit(AF_ONLINE, &ha->flags)) {
302  DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
303  "Do not issue command.\n",
304  ha->host_no, __func__));
305  goto queuing_error;
306  }
307 
308  /* Calculate the number of request entries needed. */
309  nseg = scsi_dma_map(cmd);
310  if (nseg < 0)
311  goto queuing_error;
312  tot_dsds = nseg;
313 
314  req_cnt = qla4xxx_calc_request_entries(tot_dsds);
315  if (!qla4xxx_space_in_req_ring(ha, req_cnt))
316  goto queuing_error;
317 
318  /* total iocbs active */
319  if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
320  goto queuing_error;
321 
322  /* Build command packet */
323  cmd_entry = (struct command_t3_entry *) ha->request_ptr;
324  memset(cmd_entry, 0, sizeof(struct command_t3_entry));
325  cmd_entry->hdr.entryType = ET_COMMAND;
326  cmd_entry->handle = cpu_to_le32(index);
327  cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
328 
329  int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
330  cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
331  memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
332  cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
333  cmd_entry->hdr.entryCount = req_cnt;
334 
335  /* Set data transfer direction control flags
336  * NOTE: Look at data_direction bits iff there is data to be
337  * transferred, as the data direction bit is sometimed filled
338  * in when there is no data to be transferred */
339  cmd_entry->control_flags = CF_NO_DATA;
340  if (scsi_bufflen(cmd)) {
341  if (cmd->sc_data_direction == DMA_TO_DEVICE)
342  cmd_entry->control_flags = CF_WRITE;
343  else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
344  cmd_entry->control_flags = CF_READ;
345 
346  ha->bytes_xfered += scsi_bufflen(cmd);
347  if (ha->bytes_xfered & ~0xFFFFF){
348  ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
349  ha->bytes_xfered &= 0xFFFFF;
350  }
351  }
352 
353  /* Set tagged queueing control flags */
354  cmd_entry->control_flags |= CF_SIMPLE_TAG;
355  if (scsi_populate_tag_msg(cmd, tag))
356  switch (tag[0]) {
357  case MSG_HEAD_TAG:
358  cmd_entry->control_flags |= CF_HEAD_TAG;
359  break;
360  case MSG_ORDERED_TAG:
361  cmd_entry->control_flags |= CF_ORDERED_TAG;
362  break;
363  }
364 
365  qla4xxx_advance_req_ring_ptr(ha);
366  qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
367  wmb();
368 
369  srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
370 
371  /* update counters */
372  srb->state = SRB_ACTIVE_STATE;
373  srb->flags |= SRB_DMA_VALID;
374 
375  /* Track IOCB used */
376  ha->iocb_cnt += req_cnt;
377  srb->iocb_cnt = req_cnt;
378  ha->req_q_count -= req_cnt;
379 
380  ha->isp_ops->queue_iocb(ha);
381  spin_unlock_irqrestore(&ha->hardware_lock, flags);
382 
383  return QLA_SUCCESS;
384 
385 queuing_error:
386  if (tot_dsds)
387  scsi_dma_unmap(cmd);
388 
389  spin_unlock_irqrestore(&ha->hardware_lock, flags);
390 
391  return QLA_ERROR;
392 }
393 
395 {
396  struct passthru0 *passthru_iocb;
397  struct iscsi_session *sess = task->conn->session;
398  struct ddb_entry *ddb_entry = sess->dd_data;
399  struct scsi_qla_host *ha = ddb_entry->ha;
400  struct ql4_task_data *task_data = task->dd_data;
401  uint16_t ctrl_flags = 0;
402  unsigned long flags;
403  int ret = QLA_ERROR;
404 
405  spin_lock_irqsave(&ha->hardware_lock, flags);
406  task_data->iocb_req_cnt = 1;
407  /* Put the IOCB on the request queue */
408  if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
409  goto queuing_error;
410 
411  passthru_iocb = (struct passthru0 *) ha->request_ptr;
412 
413  memset(passthru_iocb, 0, sizeof(struct passthru0));
414  passthru_iocb->hdr.entryType = ET_PASSTHRU0;
415  passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
416  passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
417  passthru_iocb->handle = task->itt;
418  passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
419  passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
420 
421  /* Setup the out & in DSDs */
422  if (task_data->req_len) {
423  memcpy((uint8_t *)task_data->req_buffer +
424  sizeof(struct iscsi_hdr), task->data, task->data_count);
425  ctrl_flags |= PT_FLAG_SEND_BUFFER;
426  passthru_iocb->out_dsd.base.addrLow =
427  cpu_to_le32(LSDW(task_data->req_dma));
428  passthru_iocb->out_dsd.base.addrHigh =
429  cpu_to_le32(MSDW(task_data->req_dma));
430  passthru_iocb->out_dsd.count =
431  cpu_to_le32(task->data_count +
432  sizeof(struct iscsi_hdr));
433  }
434  if (task_data->resp_len) {
435  passthru_iocb->in_dsd.base.addrLow =
436  cpu_to_le32(LSDW(task_data->resp_dma));
437  passthru_iocb->in_dsd.base.addrHigh =
438  cpu_to_le32(MSDW(task_data->resp_dma));
439  passthru_iocb->in_dsd.count =
440  cpu_to_le32(task_data->resp_len);
441  }
442 
443  ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
444  passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
445 
446  /* Update the request pointer */
447  qla4xxx_advance_req_ring_ptr(ha);
448  wmb();
449 
450  /* Track IOCB used */
451  ha->iocb_cnt += task_data->iocb_req_cnt;
452  ha->req_q_count -= task_data->iocb_req_cnt;
453  ha->isp_ops->queue_iocb(ha);
454  ret = QLA_SUCCESS;
455 
456 queuing_error:
457  spin_unlock_irqrestore(&ha->hardware_lock, flags);
458  return ret;
459 }
460 
461 static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
462 {
463  struct mrb *mrb;
464 
465  mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
466  if (!mrb)
467  return mrb;
468 
469  mrb->ha = ha;
470  return mrb;
471 }
472 
473 static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
474  uint32_t *in_mbox)
475 {
476  int rval = QLA_SUCCESS;
477  uint32_t i;
478  unsigned long flags;
479  uint32_t index = 0;
480 
481  /* Acquire hardware specific lock */
482  spin_lock_irqsave(&ha->hardware_lock, flags);
483 
484  /* Get pointer to the queue entry for the marker */
485  rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
486  if (rval != QLA_SUCCESS)
487  goto exit_mbox_iocb;
488 
489  index = ha->mrb_index;
490  /* get valid mrb index*/
491  for (i = 0; i < MAX_MRB; i++) {
492  index++;
493  if (index == MAX_MRB)
494  index = 1;
495  if (ha->active_mrb_array[index] == NULL) {
496  ha->mrb_index = index;
497  break;
498  }
499  }
500 
501  mrb->iocb_cnt = 1;
502  ha->active_mrb_array[index] = mrb;
503  mrb->mbox->handle = index;
504  mrb->mbox->hdr.entryType = ET_MBOX_CMD;
505  mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
506  memcpy(mrb->mbox->in_mbox, in_mbox, 32);
507  mrb->mbox_cmd = in_mbox[0];
508  wmb();
509 
510  ha->isp_ops->queue_iocb(ha);
511 exit_mbox_iocb:
512  spin_unlock_irqrestore(&ha->hardware_lock, flags);
513  return rval;
514 }
515 
518 {
519  uint32_t in_mbox[8];
520  struct mrb *mrb = NULL;
521  int rval = QLA_SUCCESS;
522 
523  memset(in_mbox, 0, sizeof(in_mbox));
524 
525  mrb = qla4xxx_get_new_mrb(ha);
526  if (!mrb) {
527  DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
528  __func__));
529  rval = QLA_ERROR;
530  goto exit_ping;
531  }
532 
533  in_mbox[0] = MBOX_CMD_PING;
534  in_mbox[1] = options;
535  memcpy(&in_mbox[2], &ipaddr[0], 4);
536  memcpy(&in_mbox[3], &ipaddr[4], 4);
537  memcpy(&in_mbox[4], &ipaddr[8], 4);
538  memcpy(&in_mbox[5], &ipaddr[12], 4);
539  in_mbox[6] = payload_size;
540 
541  mrb->pid = pid;
542  rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
543 
544  if (rval != QLA_SUCCESS)
545  goto exit_ping;
546 
547  return rval;
548 exit_ping:
549  kfree(mrb);
550  return rval;
551 }