Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qla_mid.c
Go to the documentation of this file.
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c) 2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 #include "qla_target.h"
10 
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
19 
20 void
22 {
23  if (vha->vp_idx && vha->timer_active) {
24  del_timer_sync(&vha->timer);
25  vha->timer_active = 0;
26  }
27 }
28 
29 static uint32_t
30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
31 {
32  uint32_t vp_id;
33  struct qla_hw_data *ha = vha->hw;
34  unsigned long flags;
35 
36  /* Find an empty slot and assign an vp_id */
37  mutex_lock(&ha->vport_lock);
38  vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39  if (vp_id > ha->max_npiv_vports) {
40  ql_dbg(ql_dbg_vport, vha, 0xa000,
41  "vp_id %d is bigger than max-supported %d.\n",
42  vp_id, ha->max_npiv_vports);
44  return vp_id;
45  }
46 
47  set_bit(vp_id, ha->vp_idx_map);
48  ha->num_vhosts++;
49  vha->vp_idx = vp_id;
50 
51  spin_lock_irqsave(&ha->vport_slock, flags);
52  list_add_tail(&vha->list, &ha->vp_list);
53 
55 
56  spin_unlock_irqrestore(&ha->vport_slock, flags);
57 
59  return vp_id;
60 }
61 
62 void
64 {
65  uint16_t vp_id;
66  struct qla_hw_data *ha = vha->hw;
67  unsigned long flags = 0;
68 
69  mutex_lock(&ha->vport_lock);
70  /*
71  * Wait for all pending activities to finish before removing vport from
72  * the list.
73  * Lock needs to be held for safe removal from the list (it
74  * ensures no active vp_list traversal while the vport is removed
75  * from the queue)
76  */
77  spin_lock_irqsave(&ha->vport_slock, flags);
78  while (atomic_read(&vha->vref_count)) {
79  spin_unlock_irqrestore(&ha->vport_slock, flags);
80 
81  msleep(500);
82 
83  spin_lock_irqsave(&ha->vport_slock, flags);
84  }
85  list_del(&vha->list);
87  spin_unlock_irqrestore(&ha->vport_slock, flags);
88 
89  vp_id = vha->vp_idx;
90  ha->num_vhosts--;
91  clear_bit(vp_id, ha->vp_idx_map);
92 
94 }
95 
96 static scsi_qla_host_t *
97 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
98 {
100  struct scsi_qla_host *tvha;
101  unsigned long flags;
102 
103  spin_lock_irqsave(&ha->vport_slock, flags);
104  /* Locate matching device in database. */
105  list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
106  if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
107  spin_unlock_irqrestore(&ha->vport_slock, flags);
108  return vha;
109  }
110  }
111  spin_unlock_irqrestore(&ha->vport_slock, flags);
112  return NULL;
113 }
114 
115 /*
116  * qla2x00_mark_vp_devices_dead
117  * Updates fcport state when device goes offline.
118  *
119  * Input:
120  * ha = adapter block pointer.
121  * fcport = port structure pointer.
122  *
123  * Return:
124  * None.
125  *
126  * Context:
127  */
128 static void
129 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
130 {
131  /*
132  * !!! NOTE !!!
133  * This function, if called in contexts other than vp create, disable
134  * or delete, please make sure this is synchronized with the
135  * delete thread.
136  */
137  fc_port_t *fcport;
138 
139  list_for_each_entry(fcport, &vha->vp_fcports, list) {
140  ql_dbg(ql_dbg_vport, vha, 0xa001,
141  "Marking port dead, loop_id=0x%04x : %x.\n",
142  fcport->loop_id, fcport->vha->vp_idx);
143 
144  qla2x00_mark_device_lost(vha, fcport, 0, 0);
145  qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
146  }
147 }
148 
149 int
151 {
152  unsigned long flags;
153  int ret;
154 
158 
159  /* Remove port id from vp target map */
160  spin_lock_irqsave(&vha->hw->vport_slock, flags);
162  spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
163 
164  qla2x00_mark_vp_devices_dead(vha);
165  atomic_set(&vha->vp_state, VP_FAILED);
166  vha->flags.management_server_logged_in = 0;
167  if (ret == QLA_SUCCESS) {
168  fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
169  } else {
170  fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
171  return -1;
172  }
173  return 0;
174 }
175 
176 int
178 {
179  int ret;
180  struct qla_hw_data *ha = vha->hw;
181  scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
182 
183  /* Check if physical ha port is Up */
184  if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
185  atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
186  !(ha->current_topology & ISP_CFG_F)) {
188  fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
189  goto enable_failed;
190  }
191 
192  /* Initialize the new vport unless it is a persistent port */
193  mutex_lock(&ha->vport_lock);
194  ret = qla24xx_modify_vp_config(vha);
195  mutex_unlock(&ha->vport_lock);
196 
197  if (ret != QLA_SUCCESS) {
198  fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
199  goto enable_failed;
200  }
201 
202  ql_dbg(ql_dbg_taskm, vha, 0x801a,
203  "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
204  return 0;
205 
206 enable_failed:
207  ql_dbg(ql_dbg_taskm, vha, 0x801b,
208  "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
209  return 1;
210 }
211 
212 static void
213 qla24xx_configure_vp(scsi_qla_host_t *vha)
214 {
215  struct fc_vport *fc_vport;
216  int ret;
217 
218  fc_vport = vha->fc_vport;
219 
220  ql_dbg(ql_dbg_vport, vha, 0xa002,
221  "%s: change request #3.\n", __func__);
222  ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
223  if (ret != QLA_SUCCESS) {
224  ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
225  "receiving of RSCN requests: 0x%x.\n", ret);
226  return;
227  } else {
228  /* Corresponds to SCR enabled */
230  }
231 
232  vha->flags.online = 1;
233  if (qla24xx_configure_vhba(vha))
234  return;
235 
236  atomic_set(&vha->vp_state, VP_ACTIVE);
237  fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
238 }
239 
240 void
242 {
244  struct qla_hw_data *ha = rsp->hw;
245  int i = 0;
246  unsigned long flags;
247 
248  spin_lock_irqsave(&ha->vport_slock, flags);
249  list_for_each_entry(vha, &ha->vp_list, list) {
250  if (vha->vp_idx) {
251  atomic_inc(&vha->vref_count);
252  spin_unlock_irqrestore(&ha->vport_slock, flags);
253 
254  switch (mb[0]) {
255  case MBA_LIP_OCCURRED:
256  case MBA_LOOP_UP:
257  case MBA_LOOP_DOWN:
258  case MBA_LIP_RESET:
259  case MBA_POINT_TO_POINT:
261  case MBA_PORT_UPDATE:
262  case MBA_RSCN_UPDATE:
263  ql_dbg(ql_dbg_async, vha, 0x5024,
264  "Async_event for VP[%d], mb=0x%x vha=%p.\n",
265  i, *mb, vha);
266  qla2x00_async_event(vha, rsp, mb);
267  break;
268  }
269 
270  spin_lock_irqsave(&ha->vport_slock, flags);
271  atomic_dec(&vha->vref_count);
272  }
273  i++;
274  }
275  spin_unlock_irqrestore(&ha->vport_slock, flags);
276 }
277 
278 int
280 {
281  /*
282  * Physical port will do most of the abort and recovery work. We can
283  * just treat it as a loop down
284  */
285  if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
288  } else {
289  if (!atomic_read(&vha->loop_down_timer))
291  }
292 
293  /*
294  * To exclusively reset vport, we need to log it out first. Note: this
295  * control_vp can fail if ISP reset is already issued, this is
296  * expected, as the vp would be already logged out due to ISP reset.
297  */
298  if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
300 
301  ql_dbg(ql_dbg_taskm, vha, 0x801d,
302  "Scheduling enable of Vport %d.\n", vha->vp_idx);
303  return qla24xx_enable_vp(vha);
304 }
305 
306 static int
307 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
308 {
309  ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
310  "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
311 
312  qla2x00_do_work(vha);
313 
315  /* VP acquired. complete port configuration */
316  ql_dbg(ql_dbg_dpc, vha, 0x4014,
317  "Configure VP scheduled.\n");
318  qla24xx_configure_vp(vha);
319  ql_dbg(ql_dbg_dpc, vha, 0x4015,
320  "Configure VP end.\n");
321  return 0;
322  }
323 
325  ql_dbg(ql_dbg_dpc, vha, 0x4016,
326  "FCPort update scheduled.\n");
329  ql_dbg(ql_dbg_dpc, vha, 0x4017,
330  "FCPort update end.\n");
331  }
332 
335  atomic_read(&vha->loop_state) != LOOP_DOWN) {
336 
337  ql_dbg(ql_dbg_dpc, vha, 0x4018,
338  "Relogin needed scheduled.\n");
339  qla2x00_relogin(vha);
340  ql_dbg(ql_dbg_dpc, vha, 0x4019,
341  "Relogin needed end.\n");
342  }
343 
345  (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
347  }
348 
351  ql_dbg(ql_dbg_dpc, vha, 0x401a,
352  "Loop resync scheduled.\n");
353  qla2x00_loop_resync(vha);
355  ql_dbg(ql_dbg_dpc, vha, 0x401b,
356  "Loop resync end.\n");
357  }
358  }
359 
360  ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
361  "Exiting %s.\n", __func__);
362  return 0;
363 }
364 
365 void
367 {
368  int ret;
369  struct qla_hw_data *ha = vha->hw;
370  scsi_qla_host_t *vp;
371  unsigned long flags = 0;
372 
373  if (vha->vp_idx)
374  return;
375  if (list_empty(&ha->vp_list))
376  return;
377 
379 
380  if (!(ha->current_topology & ISP_CFG_F))
381  return;
382 
383  spin_lock_irqsave(&ha->vport_slock, flags);
384  list_for_each_entry(vp, &ha->vp_list, list) {
385  if (vp->vp_idx) {
386  atomic_inc(&vp->vref_count);
387  spin_unlock_irqrestore(&ha->vport_slock, flags);
388 
389  ret = qla2x00_do_dpc_vp(vp);
390 
391  spin_lock_irqsave(&ha->vport_slock, flags);
392  atomic_dec(&vp->vref_count);
393  }
394  }
395  spin_unlock_irqrestore(&ha->vport_slock, flags);
396 }
397 
398 int
399 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
400 {
401  scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
402  struct qla_hw_data *ha = base_vha->hw;
404  uint8_t port_name[WWN_SIZE];
405 
406  if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
407  return VPCERR_UNSUPPORTED;
408 
409  /* Check up the F/W and H/W support NPIV */
410  if (!ha->flags.npiv_supported)
411  return VPCERR_UNSUPPORTED;
412 
413  /* Check up whether npiv supported switch presented */
414  if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
415  return VPCERR_NO_FABRIC_SUPP;
416 
417  /* Check up unique WWPN */
418  u64_to_wwn(fc_vport->port_name, port_name);
419  if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
420  return VPCERR_BAD_WWN;
421  vha = qla24xx_find_vhost_by_name(ha, port_name);
422  if (vha)
423  return VPCERR_BAD_WWN;
424 
425  /* Check up max-npiv-supports */
426  if (ha->num_vhosts > ha->max_npiv_vports) {
427  ql_dbg(ql_dbg_vport, vha, 0xa004,
428  "num_vhosts %ud is bigger "
429  "than max_npiv_vports %ud.\n",
430  ha->num_vhosts, ha->max_npiv_vports);
431  return VPCERR_UNSUPPORTED;
432  }
433  return 0;
434 }
435 
437 qla24xx_create_vhost(struct fc_vport *fc_vport)
438 {
439  scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
440  struct qla_hw_data *ha = base_vha->hw;
443  struct Scsi_Host *host;
444 
445  vha = qla2x00_create_host(sht, ha);
446  if (!vha) {
447  ql_log(ql_log_warn, vha, 0xa005,
448  "scsi_host_alloc() failed for vport.\n");
449  return(NULL);
450  }
451 
452  host = vha->host;
453  fc_vport->dd_data = vha;
454  /* New host info */
455  u64_to_wwn(fc_vport->node_name, vha->node_name);
456  u64_to_wwn(fc_vport->port_name, vha->port_name);
457 
458  vha->fc_vport = fc_vport;
459  vha->device_flags = 0;
460  vha->vp_idx = qla24xx_allocate_vp_id(vha);
461  if (vha->vp_idx > ha->max_npiv_vports) {
462  ql_dbg(ql_dbg_vport, vha, 0xa006,
463  "Couldn't allocate vp_id.\n");
464  goto create_vhost_failed;
465  }
466  vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
467 
468  vha->dpc_flags = 0L;
469 
470  /*
471  * To fix the issue of processing a parent's RSCN for the vport before
472  * its SCR is complete.
473  */
477 
479 
480  vha->req = base_vha->req;
481  host->can_queue = base_vha->req->length + 128;
482  host->cmd_per_lun = 3;
483  if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
484  host->max_cmd_len = 32;
485  else
486  host->max_cmd_len = MAX_CMDSZ;
487  host->max_channel = MAX_BUSES - 1;
488  host->max_lun = ql2xmaxlun;
489  host->unique_id = host->host_no;
490  host->max_id = ha->max_fibre_devices;
492 
493  ql_dbg(ql_dbg_vport, vha, 0xa007,
494  "Detect vport hba %ld at address = %p.\n",
495  vha->host_no, vha);
496 
497  vha->flags.init_done = 1;
498 
499  mutex_lock(&ha->vport_lock);
500  set_bit(vha->vp_idx, ha->vp_idx_map);
501  ha->cur_vport_count++;
502  mutex_unlock(&ha->vport_lock);
503 
504  return vha;
505 
506 create_vhost_failed:
507  return NULL;
508 }
509 
510 static void
511 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
512 {
513  struct qla_hw_data *ha = vha->hw;
514  uint16_t que_id = req->id;
515 
516  dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
517  sizeof(request_t), req->ring, req->dma);
518  req->ring = NULL;
519  req->dma = 0;
520  if (que_id) {
521  ha->req_q_map[que_id] = NULL;
522  mutex_lock(&ha->vport_lock);
523  clear_bit(que_id, ha->req_qid_map);
524  mutex_unlock(&ha->vport_lock);
525  }
526  kfree(req);
527  req = NULL;
528 }
529 
530 static void
531 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
532 {
533  struct qla_hw_data *ha = vha->hw;
534  uint16_t que_id = rsp->id;
535 
536  if (rsp->msix && rsp->msix->have_irq) {
537  free_irq(rsp->msix->vector, rsp);
538  rsp->msix->have_irq = 0;
539  rsp->msix->rsp = NULL;
540  }
541  dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
542  sizeof(response_t), rsp->ring, rsp->dma);
543  rsp->ring = NULL;
544  rsp->dma = 0;
545  if (que_id) {
546  ha->rsp_q_map[que_id] = NULL;
547  mutex_lock(&ha->vport_lock);
548  clear_bit(que_id, ha->rsp_qid_map);
549  mutex_unlock(&ha->vport_lock);
550  }
551  kfree(rsp);
552  rsp = NULL;
553 }
554 
555 int
557 {
558  int ret = -1;
559 
560  if (req) {
561  req->options |= BIT_0;
562  ret = qla25xx_init_req_que(vha, req);
563  }
564  if (ret == QLA_SUCCESS)
565  qla25xx_free_req_que(vha, req);
566 
567  return ret;
568 }
569 
570 static int
571 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
572 {
573  int ret = -1;
574 
575  if (rsp) {
576  rsp->options |= BIT_0;
577  ret = qla25xx_init_rsp_que(vha, rsp);
578  }
579  if (ret == QLA_SUCCESS)
580  qla25xx_free_rsp_que(vha, rsp);
581 
582  return ret;
583 }
584 
585 /* Delete all queues for a given vhost */
586 int
588 {
589  int cnt, ret = 0;
590  struct req_que *req = NULL;
591  struct rsp_que *rsp = NULL;
592  struct qla_hw_data *ha = vha->hw;
593 
594  /* Delete request queues */
595  for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
596  req = ha->req_q_map[cnt];
597  if (req) {
598  ret = qla25xx_delete_req_que(vha, req);
599  if (ret != QLA_SUCCESS) {
600  ql_log(ql_log_warn, vha, 0x00ea,
601  "Couldn't delete req que %d.\n",
602  req->id);
603  return ret;
604  }
605  }
606  }
607 
608  /* Delete response queues */
609  for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
610  rsp = ha->rsp_q_map[cnt];
611  if (rsp) {
612  ret = qla25xx_delete_rsp_que(vha, rsp);
613  if (ret != QLA_SUCCESS) {
614  ql_log(ql_log_warn, vha, 0x00eb,
615  "Couldn't delete rsp que %d.\n",
616  rsp->id);
617  return ret;
618  }
619  }
620  }
621  return ret;
622 }
623 
624 int
626  uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
627 {
628  int ret = 0;
629  struct req_que *req = NULL;
630  struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
631  uint16_t que_id = 0;
633  uint32_t cnt;
634 
635  req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
636  if (req == NULL) {
637  ql_log(ql_log_fatal, base_vha, 0x00d9,
638  "Failed to allocate memory for request queue.\n");
639  goto failed;
640  }
641 
643  req->ring = dma_alloc_coherent(&ha->pdev->dev,
644  (req->length + 1) * sizeof(request_t),
645  &req->dma, GFP_KERNEL);
646  if (req->ring == NULL) {
647  ql_log(ql_log_fatal, base_vha, 0x00da,
648  "Failed to allocate memory for request_ring.\n");
649  goto que_failed;
650  }
651 
652  mutex_lock(&ha->vport_lock);
654  if (que_id >= ha->max_req_queues) {
655  mutex_unlock(&ha->vport_lock);
656  ql_log(ql_log_warn, base_vha, 0x00db,
657  "No resources to create additional request queue.\n");
658  goto que_failed;
659  }
660  set_bit(que_id, ha->req_qid_map);
661  ha->req_q_map[que_id] = req;
662  req->rid = rid;
663  req->vp_idx = vp_idx;
664  req->qos = qos;
665 
666  ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
667  "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
668  que_id, req->rid, req->vp_idx, req->qos);
669  ql_dbg(ql_dbg_init, base_vha, 0x00dc,
670  "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
671  que_id, req->rid, req->vp_idx, req->qos);
672  if (rsp_que < 0)
673  req->rsp = NULL;
674  else
675  req->rsp = ha->rsp_q_map[rsp_que];
676  /* Use alternate PCI bus number */
677  if (MSB(req->rid))
678  options |= BIT_4;
679  /* Use alternate PCI devfn */
680  if (LSB(req->rid))
681  options |= BIT_5;
682  req->options = options;
683 
684  ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
685  "options=0x%x.\n", req->options);
686  ql_dbg(ql_dbg_init, base_vha, 0x00dd,
687  "options=0x%x.\n", req->options);
688  for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
689  req->outstanding_cmds[cnt] = NULL;
690  req->current_outstanding_cmd = 1;
691 
692  req->ring_ptr = req->ring;
693  req->ring_index = 0;
694  req->cnt = req->length;
695  req->id = que_id;
696  reg = ISP_QUE_REG(ha, que_id);
697  req->max_q_depth = ha->req_q_map[0]->max_q_depth;
698  mutex_unlock(&ha->vport_lock);
699  ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
700  "ring_ptr=%p ring_index=%d, "
701  "cnt=%d id=%d max_q_depth=%d.\n",
702  req->ring_ptr, req->ring_index,
703  req->cnt, req->id, req->max_q_depth);
704  ql_dbg(ql_dbg_init, base_vha, 0x00de,
705  "ring_ptr=%p ring_index=%d, "
706  "cnt=%d id=%d max_q_depth=%d.\n",
707  req->ring_ptr, req->ring_index, req->cnt,
708  req->id, req->max_q_depth);
709 
710  ret = qla25xx_init_req_que(base_vha, req);
711  if (ret != QLA_SUCCESS) {
712  ql_log(ql_log_fatal, base_vha, 0x00df,
713  "%s failed.\n", __func__);
714  mutex_lock(&ha->vport_lock);
715  clear_bit(que_id, ha->req_qid_map);
716  mutex_unlock(&ha->vport_lock);
717  goto que_failed;
718  }
719 
720  return req->id;
721 
722 que_failed:
723  qla25xx_free_req_que(base_vha, req);
724 failed:
725  return 0;
726 }
727 
728 static void qla_do_work(struct work_struct *work)
729 {
730  unsigned long flags;
731  struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
732  struct scsi_qla_host *vha;
733  struct qla_hw_data *ha = rsp->hw;
734 
735  spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
736  vha = pci_get_drvdata(ha->pdev);
738  spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
739 }
740 
741 /* create response queue */
742 int
744  uint8_t vp_idx, uint16_t rid, int req)
745 {
746  int ret = 0;
747  struct rsp_que *rsp = NULL;
748  struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
749  uint16_t que_id = 0;
751 
752  rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
753  if (rsp == NULL) {
754  ql_log(ql_log_warn, base_vha, 0x0066,
755  "Failed to allocate memory for response queue.\n");
756  goto failed;
757  }
758 
760  rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
761  (rsp->length + 1) * sizeof(response_t),
762  &rsp->dma, GFP_KERNEL);
763  if (rsp->ring == NULL) {
764  ql_log(ql_log_warn, base_vha, 0x00e1,
765  "Failed to allocate memory for response ring.\n");
766  goto que_failed;
767  }
768 
769  mutex_lock(&ha->vport_lock);
771  if (que_id >= ha->max_rsp_queues) {
772  mutex_unlock(&ha->vport_lock);
773  ql_log(ql_log_warn, base_vha, 0x00e2,
774  "No resources to create additional request queue.\n");
775  goto que_failed;
776  }
777  set_bit(que_id, ha->rsp_qid_map);
778 
779  if (ha->flags.msix_enabled)
780  rsp->msix = &ha->msix_entries[que_id + 1];
781  else
782  ql_log(ql_log_warn, base_vha, 0x00e3,
783  "MSIX not enalbled.\n");
784 
785  ha->rsp_q_map[que_id] = rsp;
786  rsp->rid = rid;
787  rsp->vp_idx = vp_idx;
788  rsp->hw = ha;
789  ql_dbg(ql_dbg_init, base_vha, 0x00e4,
790  "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
791  que_id, rsp->rid, rsp->vp_idx, rsp->hw);
792  /* Use alternate PCI bus number */
793  if (MSB(rsp->rid))
794  options |= BIT_4;
795  /* Use alternate PCI devfn */
796  if (LSB(rsp->rid))
797  options |= BIT_5;
798  /* Enable MSIX handshake mode on for uncapable adapters */
799  if (!IS_MSIX_NACK_CAPABLE(ha))
800  options |= BIT_6;
801 
802  rsp->options = options;
803  rsp->id = que_id;
804  reg = ISP_QUE_REG(ha, que_id);
805  rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
806  rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
807  mutex_unlock(&ha->vport_lock);
808  ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
809  "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
810  rsp->options, rsp->id, rsp->rsp_q_in,
811  rsp->rsp_q_out);
812  ql_dbg(ql_dbg_init, base_vha, 0x00e5,
813  "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
814  rsp->options, rsp->id, rsp->rsp_q_in,
815  rsp->rsp_q_out);
816 
817  ret = qla25xx_request_irq(rsp);
818  if (ret)
819  goto que_failed;
820 
821  ret = qla25xx_init_rsp_que(base_vha, rsp);
822  if (ret != QLA_SUCCESS) {
823  ql_log(ql_log_fatal, base_vha, 0x00e7,
824  "%s failed.\n", __func__);
825  mutex_lock(&ha->vport_lock);
826  clear_bit(que_id, ha->rsp_qid_map);
827  mutex_unlock(&ha->vport_lock);
828  goto que_failed;
829  }
830  if (req >= 0)
831  rsp->req = ha->req_q_map[req];
832  else
833  rsp->req = NULL;
834 
836  if (rsp->hw->wq)
837  INIT_WORK(&rsp->q_work, qla_do_work);
838  return rsp->id;
839 
840 que_failed:
841  qla25xx_free_rsp_que(base_vha, rsp);
842 failed:
843  return 0;
844 }