Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
be_main.c
Go to the documentation of this file.
1 
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32 
33 #include <scsi/libiscsi.h>
34 #include <scsi/scsi_bsg_iscsi.h>
35 #include <scsi/scsi_netlink.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
42 #include "be_main.h"
43 #include "be_iscsi.h"
44 #include "be_mgmt.h"
45 #include "be_cmds.h"
46 
47 static unsigned int be_iopoll_budget = 10;
48 static unsigned int be_max_phys_size = 64;
49 static unsigned int enable_msix = 1;
50 static unsigned int gcrashmode = 0;
51 static unsigned int num_hba = 0;
52 
53 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
56 MODULE_AUTHOR("Emulex Corporation");
57 MODULE_LICENSE("GPL");
58 module_param(be_iopoll_budget, int, 0);
59 module_param(enable_msix, int, 0);
60 module_param(be_max_phys_size, uint, S_IRUGO);
61 MODULE_PARM_DESC(be_max_phys_size,
62  "Maximum Size (In Kilobytes) of physically contiguous "
63  "memory that can be allocated. Range is 16 - 128");
64 
65 #define beiscsi_disp_param(_name)\
66 ssize_t \
67 beiscsi_##_name##_disp(struct device *dev,\
68  struct device_attribute *attrib, char *buf) \
69 { \
70  struct Scsi_Host *shost = class_to_shost(dev);\
71  struct beiscsi_hba *phba = iscsi_host_priv(shost); \
72  uint32_t param_val = 0; \
73  param_val = phba->attr_##_name;\
74  return snprintf(buf, PAGE_SIZE, "%d\n",\
75  phba->attr_##_name);\
76 }
77 
78 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
79 int \
80 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
81 {\
82  if (val >= _minval && val <= _maxval) {\
83  beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
84  "BA_%d : beiscsi_"#_name" updated "\
85  "from 0x%x ==> 0x%x\n",\
86  phba->attr_##_name, val); \
87  phba->attr_##_name = val;\
88  return 0;\
89  } \
90  beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
91  "BA_%d beiscsi_"#_name" attribute "\
92  "cannot be updated to 0x%x, "\
93  "range allowed is ["#_minval" - "#_maxval"]\n", val);\
94  return -EINVAL;\
95 }
96 
97 #define beiscsi_store_param(_name) \
98 ssize_t \
99 beiscsi_##_name##_store(struct device *dev,\
100  struct device_attribute *attr, const char *buf,\
101  size_t count) \
102 { \
103  struct Scsi_Host *shost = class_to_shost(dev);\
104  struct beiscsi_hba *phba = iscsi_host_priv(shost);\
105  uint32_t param_val = 0;\
106  if (!isdigit(buf[0]))\
107  return -EINVAL;\
108  if (sscanf(buf, "%i", &param_val) != 1)\
109  return -EINVAL;\
110  if (beiscsi_##_name##_change(phba, param_val) == 0) \
111  return strlen(buf);\
112  else \
113  return -EINVAL;\
114 }
115 
116 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
117 int \
118 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
119 { \
120  if (val >= _minval && val <= _maxval) {\
121  phba->attr_##_name = val;\
122  return 0;\
123  } \
124  beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
125  "BA_%d beiscsi_"#_name" attribute " \
126  "cannot be updated to 0x%x, "\
127  "range allowed is ["#_minval" - "#_maxval"]\n", val);\
128  phba->attr_##_name = _defval;\
129  return -EINVAL;\
130 }
131 
132 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
133 static uint beiscsi_##_name = _defval;\
134 module_param(beiscsi_##_name, uint, S_IRUGO);\
135 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
136 beiscsi_disp_param(_name)\
137 beiscsi_change_param(_name, _minval, _maxval, _defval)\
138 beiscsi_store_param(_name)\
139 beiscsi_init_param(_name, _minval, _maxval, _defval)\
140 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
141  beiscsi_##_name##_disp, beiscsi_##_name##_store)
142 
143 /*
144  * When new log level added update the
145  * the MAX allowed value for log_enable
146  */
147 BEISCSI_RW_ATTR(log_enable, 0x00,
148  0xFF, 0x00, "Enable logging Bit Mask\n"
149  "\t\t\t\tInitialization Events : 0x01\n"
150  "\t\t\t\tMailbox Events : 0x02\n"
151  "\t\t\t\tMiscellaneous Events : 0x04\n"
152  "\t\t\t\tError Handling : 0x08\n"
153  "\t\t\t\tIO Path Events : 0x10\n"
154  "\t\t\t\tConfiguration Path : 0x20\n");
155 
157  &dev_attr_beiscsi_log_enable,
158  NULL,
159 };
160 
161 static int beiscsi_slave_configure(struct scsi_device *sdev)
162 {
164  return 0;
165 }
166 
167 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
168 {
169  struct iscsi_cls_session *cls_session;
170  struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
171  struct beiscsi_io_task *aborted_io_task;
172  struct iscsi_conn *conn;
173  struct beiscsi_conn *beiscsi_conn;
174  struct beiscsi_hba *phba;
175  struct iscsi_session *session;
176  struct invalidate_command_table *inv_tbl;
177  struct be_dma_mem nonemb_cmd;
178  unsigned int cid, tag, num_invalidate;
179 
180  cls_session = starget_to_session(scsi_target(sc->device));
181  session = cls_session->dd_data;
182 
183  spin_lock_bh(&session->lock);
184  if (!aborted_task || !aborted_task->sc) {
185  /* we raced */
186  spin_unlock_bh(&session->lock);
187  return SUCCESS;
188  }
189 
190  aborted_io_task = aborted_task->dd_data;
191  if (!aborted_io_task->scsi_cmnd) {
192  /* raced or invalid command */
193  spin_unlock_bh(&session->lock);
194  return SUCCESS;
195  }
196  spin_unlock_bh(&session->lock);
197  conn = aborted_task->conn;
198  beiscsi_conn = conn->dd_data;
199  phba = beiscsi_conn->phba;
200 
201  /* invalidate iocb */
203  inv_tbl = phba->inv_tbl;
204  memset(inv_tbl, 0x0, sizeof(*inv_tbl));
205  inv_tbl->cid = cid;
206  inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
207  num_invalidate = 1;
208  nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
209  sizeof(struct invalidate_commands_params_in),
210  &nonemb_cmd.dma);
211  if (nonemb_cmd.va == NULL) {
213  "BM_%d : Failed to allocate memory for"
214  "mgmt_invalidate_icds\n");
215  return FAILED;
216  }
217  nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
218 
219  tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
220  cid, &nonemb_cmd);
221  if (!tag) {
223  "BM_%d : mgmt_invalidate_icds could not be"
224  "submitted\n");
225  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
226  nonemb_cmd.va, nonemb_cmd.dma);
227 
228  return FAILED;
229  } else {
230  wait_event_interruptible(phba->ctrl.mcc_wait[tag],
231  phba->ctrl.mcc_numtag[tag]);
232  free_mcc_tag(&phba->ctrl, tag);
233  }
234  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
235  nonemb_cmd.va, nonemb_cmd.dma);
236  return iscsi_eh_abort(sc);
237 }
238 
239 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
240 {
241  struct iscsi_task *abrt_task;
242  struct beiscsi_io_task *abrt_io_task;
243  struct iscsi_conn *conn;
244  struct beiscsi_conn *beiscsi_conn;
245  struct beiscsi_hba *phba;
246  struct iscsi_session *session;
247  struct iscsi_cls_session *cls_session;
248  struct invalidate_command_table *inv_tbl;
249  struct be_dma_mem nonemb_cmd;
250  unsigned int cid, tag, i, num_invalidate;
251 
252  /* invalidate iocbs */
253  cls_session = starget_to_session(scsi_target(sc->device));
254  session = cls_session->dd_data;
255  spin_lock_bh(&session->lock);
256  if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
257  spin_unlock_bh(&session->lock);
258  return FAILED;
259  }
260  conn = session->leadconn;
261  beiscsi_conn = conn->dd_data;
262  phba = beiscsi_conn->phba;
263  cid = beiscsi_conn->beiscsi_conn_cid;
264  inv_tbl = phba->inv_tbl;
265  memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
266  num_invalidate = 0;
267  for (i = 0; i < conn->session->cmds_max; i++) {
268  abrt_task = conn->session->cmds[i];
269  abrt_io_task = abrt_task->dd_data;
270  if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
271  continue;
272 
273  if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
274  continue;
275 
276  inv_tbl->cid = cid;
277  inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
278  num_invalidate++;
279  inv_tbl++;
280  }
281  spin_unlock_bh(&session->lock);
282  inv_tbl = phba->inv_tbl;
283 
284  nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
285  sizeof(struct invalidate_commands_params_in),
286  &nonemb_cmd.dma);
287  if (nonemb_cmd.va == NULL) {
289  "BM_%d : Failed to allocate memory for"
290  "mgmt_invalidate_icds\n");
291  return FAILED;
292  }
293  nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
294  memset(nonemb_cmd.va, 0, nonemb_cmd.size);
295  tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
296  cid, &nonemb_cmd);
297  if (!tag) {
299  "BM_%d : mgmt_invalidate_icds could not be"
300  " submitted\n");
301  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
302  nonemb_cmd.va, nonemb_cmd.dma);
303  return FAILED;
304  } else {
305  wait_event_interruptible(phba->ctrl.mcc_wait[tag],
306  phba->ctrl.mcc_numtag[tag]);
307  free_mcc_tag(&phba->ctrl, tag);
308  }
309  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
310  nonemb_cmd.va, nonemb_cmd.dma);
311  return iscsi_eh_device_reset(sc);
312 }
313 
314 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
315 {
316  struct beiscsi_hba *phba = data;
317  struct mgmt_session_info *boot_sess = &phba->boot_sess;
318  struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
319  char *str = buf;
320  int rc;
321 
322  switch (type) {
323  case ISCSI_BOOT_TGT_NAME:
324  rc = sprintf(buf, "%.*s\n",
325  (int)strlen(boot_sess->target_name),
326  (char *)&boot_sess->target_name);
327  break;
329  if (boot_conn->dest_ipaddr.ip_type == 0x1)
330  rc = sprintf(buf, "%pI4\n",
331  (char *)&boot_conn->dest_ipaddr.addr);
332  else
333  rc = sprintf(str, "%pI6\n",
334  (char *)&boot_conn->dest_ipaddr.addr);
335  break;
336  case ISCSI_BOOT_TGT_PORT:
337  rc = sprintf(str, "%d\n", boot_conn->dest_port);
338  break;
339 
341  rc = sprintf(str, "%.*s\n",
342  boot_conn->negotiated_login_options.auth_data.chap.
343  target_chap_name_length,
344  (char *)&boot_conn->negotiated_login_options.
345  auth_data.chap.target_chap_name);
346  break;
348  rc = sprintf(str, "%.*s\n",
349  boot_conn->negotiated_login_options.auth_data.chap.
350  target_secret_length,
351  (char *)&boot_conn->negotiated_login_options.
352  auth_data.chap.target_secret);
353  break;
355  rc = sprintf(str, "%.*s\n",
356  boot_conn->negotiated_login_options.auth_data.chap.
357  intr_chap_name_length,
358  (char *)&boot_conn->negotiated_login_options.
359  auth_data.chap.intr_chap_name);
360  break;
362  rc = sprintf(str, "%.*s\n",
363  boot_conn->negotiated_login_options.auth_data.chap.
364  intr_secret_length,
365  (char *)&boot_conn->negotiated_login_options.
366  auth_data.chap.intr_secret);
367  break;
369  rc = sprintf(str, "2\n");
370  break;
372  rc = sprintf(str, "0\n");
373  break;
374  default:
375  rc = -ENOSYS;
376  break;
377  }
378  return rc;
379 }
380 
381 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
382 {
383  struct beiscsi_hba *phba = data;
384  char *str = buf;
385  int rc;
386 
387  switch (type) {
389  rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
390  break;
391  default:
392  rc = -ENOSYS;
393  break;
394  }
395  return rc;
396 }
397 
398 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
399 {
400  struct beiscsi_hba *phba = data;
401  char *str = buf;
402  int rc;
403 
404  switch (type) {
406  rc = sprintf(str, "2\n");
407  break;
409  rc = sprintf(str, "0\n");
410  break;
411  case ISCSI_BOOT_ETH_MAC:
412  rc = beiscsi_get_macaddr(str, phba);
413  break;
414  default:
415  rc = -ENOSYS;
416  break;
417  }
418  return rc;
419 }
420 
421 
422 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
423 {
424  umode_t rc;
425 
426  switch (type) {
427  case ISCSI_BOOT_TGT_NAME:
429  case ISCSI_BOOT_TGT_PORT:
436  rc = S_IRUGO;
437  break;
438  default:
439  rc = 0;
440  break;
441  }
442  return rc;
443 }
444 
445 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
446 {
447  umode_t rc;
448 
449  switch (type) {
451  rc = S_IRUGO;
452  break;
453  default:
454  rc = 0;
455  break;
456  }
457  return rc;
458 }
459 
460 
461 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
462 {
463  umode_t rc;
464 
465  switch (type) {
467  case ISCSI_BOOT_ETH_MAC:
469  rc = S_IRUGO;
470  break;
471  default:
472  rc = 0;
473  break;
474  }
475  return rc;
476 }
477 
478 /*------------------- PCI Driver operations and data ----------------- */
479 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
485  { 0 }
486 };
487 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
488 
489 
490 static struct scsi_host_template beiscsi_sht = {
491  .module = THIS_MODULE,
492  .name = "Emulex 10Gbe open-iscsi Initiator Driver",
493  .proc_name = DRV_NAME,
494  .queuecommand = iscsi_queuecommand,
495  .change_queue_depth = iscsi_change_queue_depth,
496  .slave_configure = beiscsi_slave_configure,
497  .target_alloc = iscsi_target_alloc,
498  .eh_abort_handler = beiscsi_eh_abort,
499  .eh_device_reset_handler = beiscsi_eh_device_reset,
500  .eh_target_reset_handler = iscsi_eh_session_reset,
501  .shost_attrs = beiscsi_attrs,
502  .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
503  .can_queue = BE2_IO_DEPTH,
504  .this_id = -1,
505  .max_sectors = BEISCSI_MAX_SECTORS,
506  .cmd_per_lun = BEISCSI_CMD_PER_LUN,
507  .use_clustering = ENABLE_CLUSTERING,
508  .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
509 
510 };
511 
512 static struct scsi_transport_template *beiscsi_scsi_transport;
513 
514 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
515 {
516  struct beiscsi_hba *phba;
517  struct Scsi_Host *shost;
518 
519  shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
520  if (!shost) {
521  dev_err(&pcidev->dev,
522  "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
523  return NULL;
524  }
525  shost->dma_boundary = pcidev->dma_mask;
526  shost->max_id = BE2_MAX_SESSIONS;
527  shost->max_channel = 0;
529  shost->max_lun = BEISCSI_NUM_MAX_LUN;
530  shost->transportt = beiscsi_scsi_transport;
531  phba = iscsi_host_priv(shost);
532  memset(phba, 0, sizeof(*phba));
533  phba->shost = shost;
534  phba->pcidev = pci_dev_get(pcidev);
535  pci_set_drvdata(pcidev, phba);
536  phba->interface_handle = 0xFFFFFFFF;
537 
538  if (iscsi_host_add(shost, &phba->pcidev->dev))
539  goto free_devices;
540 
541  return phba;
542 
543 free_devices:
544  pci_dev_put(phba->pcidev);
545  iscsi_host_free(phba->shost);
546  return NULL;
547 }
548 
549 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
550 {
551  if (phba->csr_va) {
552  iounmap(phba->csr_va);
553  phba->csr_va = NULL;
554  }
555  if (phba->db_va) {
556  iounmap(phba->db_va);
557  phba->db_va = NULL;
558  }
559  if (phba->pci_va) {
560  iounmap(phba->pci_va);
561  phba->pci_va = NULL;
562  }
563 }
564 
565 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
566  struct pci_dev *pcidev)
567 {
568  u8 __iomem *addr;
569  int pcicfg_reg;
570 
571  addr = ioremap_nocache(pci_resource_start(pcidev, 2),
572  pci_resource_len(pcidev, 2));
573  if (addr == NULL)
574  return -ENOMEM;
575  phba->ctrl.csr = addr;
576  phba->csr_va = addr;
577  phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
578 
579  addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
580  if (addr == NULL)
581  goto pci_map_err;
582  phba->ctrl.db = addr;
583  phba->db_va = addr;
584  phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
585 
586  if (phba->generation == BE_GEN2)
587  pcicfg_reg = 1;
588  else
589  pcicfg_reg = 0;
590 
591  addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
592  pci_resource_len(pcidev, pcicfg_reg));
593 
594  if (addr == NULL)
595  goto pci_map_err;
596  phba->ctrl.pcicfg = addr;
597  phba->pci_va = addr;
598  phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
599  return 0;
600 
601 pci_map_err:
602  beiscsi_unmap_pci_function(phba);
603  return -ENOMEM;
604 }
605 
606 static int beiscsi_enable_pci(struct pci_dev *pcidev)
607 {
608  int ret;
609 
610  ret = pci_enable_device(pcidev);
611  if (ret) {
612  dev_err(&pcidev->dev,
613  "beiscsi_enable_pci - enable device failed\n");
614  return ret;
615  }
616 
617  pci_set_master(pcidev);
618  if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
619  ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
620  if (ret) {
621  dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
622  pci_disable_device(pcidev);
623  return ret;
624  }
625  }
626  return 0;
627 }
628 
629 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
630 {
631  struct be_ctrl_info *ctrl = &phba->ctrl;
632  struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
633  struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
634  int status = 0;
635 
636  ctrl->pdev = pdev;
637  status = beiscsi_map_pci_bars(phba, pdev);
638  if (status)
639  return status;
640  mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
641  mbox_mem_alloc->va = pci_alloc_consistent(pdev,
642  mbox_mem_alloc->size,
643  &mbox_mem_alloc->dma);
644  if (!mbox_mem_alloc->va) {
645  beiscsi_unmap_pci_function(phba);
646  return -ENOMEM;
647  }
648 
649  mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
650  mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
651  mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
652  memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
653  spin_lock_init(&ctrl->mbox_lock);
654  spin_lock_init(&phba->ctrl.mcc_lock);
655  spin_lock_init(&phba->ctrl.mcc_cq_lock);
656 
657  return status;
658 }
659 
660 static void beiscsi_get_params(struct beiscsi_hba *phba)
661 {
662  phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
663  - (phba->fw_config.iscsi_cid_count
664  + BE2_TMFS
665  + BE2_NOPOUT_REQ));
666  phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
667  phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
668  phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
669  phba->params.num_sge_per_io = BE2_SGE;
670  phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
671  phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
672  phba->params.eq_timer = 64;
673  phba->params.num_eq_entries =
674  (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
675  + BE2_TMFS) / 512) + 1) * 512;
676  phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
677  ? 1024 : phba->params.num_eq_entries;
679  "BM_%d : phba->params.num_eq_entries=%d\n",
680  phba->params.num_eq_entries);
681  phba->params.num_cq_entries =
682  (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
683  + BE2_TMFS) / 512) + 1) * 512;
684  phba->params.wrbs_per_cxn = 256;
685 }
686 
687 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
688  unsigned int id, unsigned int clr_interrupt,
689  unsigned int num_processed,
690  unsigned char rearm, unsigned char event)
691 {
692  u32 val = 0;
693  val |= id & DB_EQ_RING_ID_MASK;
694  if (rearm)
695  val |= 1 << DB_EQ_REARM_SHIFT;
696  if (clr_interrupt)
697  val |= 1 << DB_EQ_CLR_SHIFT;
698  if (event)
699  val |= 1 << DB_EQ_EVNT_SHIFT;
700  val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
701  iowrite32(val, phba->db_va + DB_EQ_OFFSET);
702 }
703 
709 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
710 {
711  struct beiscsi_hba *phba;
712  struct be_eq_entry *eqe = NULL;
713  struct be_queue_info *eq;
714  struct be_queue_info *mcc;
715  unsigned int num_eq_processed;
716  struct be_eq_obj *pbe_eq;
717  unsigned long flags;
718 
719  pbe_eq = dev_id;
720  eq = &pbe_eq->q;
721  phba = pbe_eq->phba;
722  mcc = &phba->ctrl.mcc_obj.cq;
723  eqe = queue_tail_node(eq);
724 
725  num_eq_processed = 0;
726 
727  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
728  & EQE_VALID_MASK) {
729  if (((eqe->dw[offsetof(struct amap_eq_entry,
730  resource_id) / 32] &
731  EQE_RESID_MASK) >> 16) == mcc->id) {
732  spin_lock_irqsave(&phba->isr_lock, flags);
733  phba->todo_mcc_cq = 1;
734  spin_unlock_irqrestore(&phba->isr_lock, flags);
735  }
736  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
737  queue_tail_inc(eq);
738  eqe = queue_tail_node(eq);
739  num_eq_processed++;
740  }
741  if (phba->todo_mcc_cq)
742  queue_work(phba->wq, &phba->work_cqs);
743  if (num_eq_processed)
744  hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
745 
746  return IRQ_HANDLED;
747 }
748 
754 static irqreturn_t be_isr_msix(int irq, void *dev_id)
755 {
756  struct beiscsi_hba *phba;
757  struct be_eq_entry *eqe = NULL;
758  struct be_queue_info *eq;
759  struct be_queue_info *cq;
760  unsigned int num_eq_processed;
761  struct be_eq_obj *pbe_eq;
762  unsigned long flags;
763 
764  pbe_eq = dev_id;
765  eq = &pbe_eq->q;
766  cq = pbe_eq->cq;
767  eqe = queue_tail_node(eq);
768 
769  phba = pbe_eq->phba;
770  num_eq_processed = 0;
771  if (blk_iopoll_enabled) {
772  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
773  & EQE_VALID_MASK) {
774  if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
775  blk_iopoll_sched(&pbe_eq->iopoll);
776 
777  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
778  queue_tail_inc(eq);
779  eqe = queue_tail_node(eq);
780  num_eq_processed++;
781  }
782  if (num_eq_processed)
783  hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
784 
785  return IRQ_HANDLED;
786  } else {
787  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
788  & EQE_VALID_MASK) {
789  spin_lock_irqsave(&phba->isr_lock, flags);
790  phba->todo_cq = 1;
791  spin_unlock_irqrestore(&phba->isr_lock, flags);
792  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
793  queue_tail_inc(eq);
794  eqe = queue_tail_node(eq);
795  num_eq_processed++;
796  }
797  if (phba->todo_cq)
798  queue_work(phba->wq, &phba->work_cqs);
799 
800  if (num_eq_processed)
801  hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
802 
803  return IRQ_HANDLED;
804  }
805 }
806 
812 static irqreturn_t be_isr(int irq, void *dev_id)
813 {
814  struct beiscsi_hba *phba;
815  struct hwi_controller *phwi_ctrlr;
816  struct hwi_context_memory *phwi_context;
817  struct be_eq_entry *eqe = NULL;
818  struct be_queue_info *eq;
819  struct be_queue_info *cq;
820  struct be_queue_info *mcc;
821  unsigned long flags, index;
822  unsigned int num_mcceq_processed, num_ioeq_processed;
823  struct be_ctrl_info *ctrl;
824  struct be_eq_obj *pbe_eq;
825  int isr;
826 
827  phba = dev_id;
828  ctrl = &phba->ctrl;
829  isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
830  (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
831  if (!isr)
832  return IRQ_NONE;
833 
834  phwi_ctrlr = phba->phwi_ctrlr;
835  phwi_context = phwi_ctrlr->phwi_ctxt;
836  pbe_eq = &phwi_context->be_eq[0];
837 
838  eq = &phwi_context->be_eq[0].q;
839  mcc = &phba->ctrl.mcc_obj.cq;
840  index = 0;
841  eqe = queue_tail_node(eq);
842 
843  num_ioeq_processed = 0;
844  num_mcceq_processed = 0;
845  if (blk_iopoll_enabled) {
846  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
847  & EQE_VALID_MASK) {
848  if (((eqe->dw[offsetof(struct amap_eq_entry,
849  resource_id) / 32] &
850  EQE_RESID_MASK) >> 16) == mcc->id) {
851  spin_lock_irqsave(&phba->isr_lock, flags);
852  phba->todo_mcc_cq = 1;
853  spin_unlock_irqrestore(&phba->isr_lock, flags);
854  num_mcceq_processed++;
855  } else {
856  if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
857  blk_iopoll_sched(&pbe_eq->iopoll);
858  num_ioeq_processed++;
859  }
860  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
861  queue_tail_inc(eq);
862  eqe = queue_tail_node(eq);
863  }
864  if (num_ioeq_processed || num_mcceq_processed) {
865  if (phba->todo_mcc_cq)
866  queue_work(phba->wq, &phba->work_cqs);
867 
868  if ((num_mcceq_processed) && (!num_ioeq_processed))
869  hwi_ring_eq_db(phba, eq->id, 0,
870  (num_ioeq_processed +
871  num_mcceq_processed) , 1, 1);
872  else
873  hwi_ring_eq_db(phba, eq->id, 0,
874  (num_ioeq_processed +
875  num_mcceq_processed), 0, 1);
876 
877  return IRQ_HANDLED;
878  } else
879  return IRQ_NONE;
880  } else {
881  cq = &phwi_context->be_cq[0];
882  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
883  & EQE_VALID_MASK) {
884 
885  if (((eqe->dw[offsetof(struct amap_eq_entry,
886  resource_id) / 32] &
887  EQE_RESID_MASK) >> 16) != cq->id) {
888  spin_lock_irqsave(&phba->isr_lock, flags);
889  phba->todo_mcc_cq = 1;
890  spin_unlock_irqrestore(&phba->isr_lock, flags);
891  } else {
892  spin_lock_irqsave(&phba->isr_lock, flags);
893  phba->todo_cq = 1;
894  spin_unlock_irqrestore(&phba->isr_lock, flags);
895  }
896  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
897  queue_tail_inc(eq);
898  eqe = queue_tail_node(eq);
899  num_ioeq_processed++;
900  }
901  if (phba->todo_cq || phba->todo_mcc_cq)
902  queue_work(phba->wq, &phba->work_cqs);
903 
904  if (num_ioeq_processed) {
905  hwi_ring_eq_db(phba, eq->id, 0,
906  num_ioeq_processed, 1, 1);
907  return IRQ_HANDLED;
908  } else
909  return IRQ_NONE;
910  }
911 }
912 
913 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
914 {
915  struct pci_dev *pcidev = phba->pcidev;
916  struct hwi_controller *phwi_ctrlr;
917  struct hwi_context_memory *phwi_context;
918  int ret, msix_vec, i, j;
919 
920  phwi_ctrlr = phba->phwi_ctrlr;
921  phwi_context = phwi_ctrlr->phwi_ctxt;
922 
923  if (phba->msix_enabled) {
924  for (i = 0; i < phba->num_cpus; i++) {
925  phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
926  GFP_KERNEL);
927  if (!phba->msi_name[i]) {
928  ret = -ENOMEM;
929  goto free_msix_irqs;
930  }
931 
932  sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
933  phba->shost->host_no, i);
934  msix_vec = phba->msix_entries[i].vector;
935  ret = request_irq(msix_vec, be_isr_msix, 0,
936  phba->msi_name[i],
937  &phwi_context->be_eq[i]);
938  if (ret) {
940  "BM_%d : beiscsi_init_irqs-Failed to"
941  "register msix for i = %d\n",
942  i);
943  kfree(phba->msi_name[i]);
944  goto free_msix_irqs;
945  }
946  }
947  phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
948  if (!phba->msi_name[i]) {
949  ret = -ENOMEM;
950  goto free_msix_irqs;
951  }
952  sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
953  phba->shost->host_no);
954  msix_vec = phba->msix_entries[i].vector;
955  ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
956  &phwi_context->be_eq[i]);
957  if (ret) {
959  "BM_%d : beiscsi_init_irqs-"
960  "Failed to register beiscsi_msix_mcc\n");
961  kfree(phba->msi_name[i]);
962  goto free_msix_irqs;
963  }
964 
965  } else {
966  ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
967  "beiscsi", phba);
968  if (ret) {
970  "BM_%d : beiscsi_init_irqs-"
971  "Failed to register irq\\n");
972  return ret;
973  }
974  }
975  return 0;
976 free_msix_irqs:
977  for (j = i - 1; j >= 0; j--) {
978  kfree(phba->msi_name[j]);
979  msix_vec = phba->msix_entries[j].vector;
980  free_irq(msix_vec, &phwi_context->be_eq[j]);
981  }
982  return ret;
983 }
984 
985 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
986  unsigned int id, unsigned int num_processed,
987  unsigned char rearm, unsigned char event)
988 {
989  u32 val = 0;
990  val |= id & DB_CQ_RING_ID_MASK;
991  if (rearm)
992  val |= 1 << DB_CQ_REARM_SHIFT;
993  val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
994  iowrite32(val, phba->db_va + DB_CQ_OFFSET);
995 }
996 
997 static unsigned int
998 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
999  struct beiscsi_hba *phba,
1000  unsigned short cid,
1001  struct pdu_base *ppdu,
1002  unsigned long pdu_len,
1003  void *pbuffer, unsigned long buf_len)
1004 {
1005  struct iscsi_conn *conn = beiscsi_conn->conn;
1006  struct iscsi_session *session = conn->session;
1007  struct iscsi_task *task;
1008  struct beiscsi_io_task *io_task;
1009  struct iscsi_hdr *login_hdr;
1010 
1011  switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1013  case ISCSI_OP_NOOP_IN:
1014  pbuffer = NULL;
1015  buf_len = 0;
1016  break;
1017  case ISCSI_OP_ASYNC_EVENT:
1018  break;
1019  case ISCSI_OP_REJECT:
1020  WARN_ON(!pbuffer);
1021  WARN_ON(!(buf_len == 48));
1022  beiscsi_log(phba, KERN_ERR,
1024  "BM_%d : In ISCSI_OP_REJECT\n");
1025  break;
1026  case ISCSI_OP_LOGIN_RSP:
1027  case ISCSI_OP_TEXT_RSP:
1028  task = conn->login_task;
1029  io_task = task->dd_data;
1030  login_hdr = (struct iscsi_hdr *)ppdu;
1031  login_hdr->itt = io_task->libiscsi_itt;
1032  break;
1033  default:
1034  beiscsi_log(phba, KERN_WARNING,
1036  "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1037  (ppdu->
1038  dw[offsetof(struct amap_pdu_base, opcode) / 32]
1039  & PDUBASE_OPCODE_MASK));
1040  return 1;
1041  }
1042 
1043  spin_lock_bh(&session->lock);
1044  __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1045  spin_unlock_bh(&session->lock);
1046  return 0;
1047 }
1048 
1049 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1050 {
1051  struct sgl_handle *psgl_handle;
1052 
1053  if (phba->io_sgl_hndl_avbl) {
1055  "BM_%d : In alloc_io_sgl_handle,"
1056  " io_sgl_alloc_index=%d\n",
1057  phba->io_sgl_alloc_index);
1058 
1059  psgl_handle = phba->io_sgl_hndl_base[phba->
1060  io_sgl_alloc_index];
1061  phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1062  phba->io_sgl_hndl_avbl--;
1063  if (phba->io_sgl_alloc_index == (phba->params.
1064  ios_per_ctrl - 1))
1065  phba->io_sgl_alloc_index = 0;
1066  else
1067  phba->io_sgl_alloc_index++;
1068  } else
1069  psgl_handle = NULL;
1070  return psgl_handle;
1071 }
1072 
1073 static void
1074 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1075 {
1077  "BM_%d : In free_,io_sgl_free_index=%d\n",
1078  phba->io_sgl_free_index);
1079 
1080  if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1081  /*
1082  * this can happen if clean_task is called on a task that
1083  * failed in xmit_task or alloc_pdu.
1084  */
1086  "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1087  "value there=%p\n", phba->io_sgl_free_index,
1088  phba->io_sgl_hndl_base
1089  [phba->io_sgl_free_index]);
1090  return;
1091  }
1092  phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1093  phba->io_sgl_hndl_avbl++;
1094  if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1095  phba->io_sgl_free_index = 0;
1096  else
1097  phba->io_sgl_free_index++;
1098 }
1099 
1107 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1108 {
1109  struct hwi_wrb_context *pwrb_context;
1110  struct hwi_controller *phwi_ctrlr;
1111  struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1112 
1113  phwi_ctrlr = phba->phwi_ctrlr;
1114  pwrb_context = &phwi_ctrlr->wrb_context[cid];
1115  if (pwrb_context->wrb_handles_available >= 2) {
1116  pwrb_handle = pwrb_context->pwrb_handle_base[
1117  pwrb_context->alloc_index];
1118  pwrb_context->wrb_handles_available--;
1119  if (pwrb_context->alloc_index ==
1120  (phba->params.wrbs_per_cxn - 1))
1121  pwrb_context->alloc_index = 0;
1122  else
1123  pwrb_context->alloc_index++;
1124  pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1125  pwrb_context->alloc_index];
1126  pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1127  } else
1128  pwrb_handle = NULL;
1129  return pwrb_handle;
1130 }
1131 
1140 static void
1141 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1142  struct wrb_handle *pwrb_handle)
1143 {
1144  pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1145  pwrb_context->wrb_handles_available++;
1146  if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1147  pwrb_context->free_index = 0;
1148  else
1149  pwrb_context->free_index++;
1150 
1151  beiscsi_log(phba, KERN_INFO,
1153  "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1154  "wrb_handles_available=%d\n",
1155  pwrb_handle, pwrb_context->free_index,
1156  pwrb_context->wrb_handles_available);
1157 }
1158 
1159 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1160 {
1161  struct sgl_handle *psgl_handle;
1162 
1163  if (phba->eh_sgl_hndl_avbl) {
1164  psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1165  phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1167  "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1168  phba->eh_sgl_alloc_index,
1169  phba->eh_sgl_alloc_index);
1170 
1171  phba->eh_sgl_hndl_avbl--;
1172  if (phba->eh_sgl_alloc_index ==
1173  (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1174  1))
1175  phba->eh_sgl_alloc_index = 0;
1176  else
1177  phba->eh_sgl_alloc_index++;
1178  } else
1179  psgl_handle = NULL;
1180  return psgl_handle;
1181 }
1182 
1183 void
1184 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1185 {
1186 
1188  "BM_%d : In free_mgmt_sgl_handle,"
1189  "eh_sgl_free_index=%d\n",
1190  phba->eh_sgl_free_index);
1191 
1192  if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1193  /*
1194  * this can happen if clean_task is called on a task that
1195  * failed in xmit_task or alloc_pdu.
1196  */
1198  "BM_%d : Double Free in eh SGL ,"
1199  "eh_sgl_free_index=%d\n",
1200  phba->eh_sgl_free_index);
1201  return;
1202  }
1203  phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1204  phba->eh_sgl_hndl_avbl++;
1205  if (phba->eh_sgl_free_index ==
1206  (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1207  phba->eh_sgl_free_index = 0;
1208  else
1209  phba->eh_sgl_free_index++;
1210 }
1211 
1212 static void
1213 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1214  struct iscsi_task *task, struct sol_cqe *psol)
1215 {
1216  struct beiscsi_io_task *io_task = task->dd_data;
1217  struct be_status_bhs *sts_bhs =
1218  (struct be_status_bhs *)io_task->cmd_bhs;
1219  struct iscsi_conn *conn = beiscsi_conn->conn;
1220  unsigned char *sense;
1221  u32 resid = 0, exp_cmdsn, max_cmdsn;
1222  u8 rsp, status, flags;
1223 
1224  exp_cmdsn = (psol->
1225  dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1227  max_cmdsn = ((psol->
1228  dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1229  & SOL_EXP_CMD_SN_MASK) +
1230  ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1231  / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1232  rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1233  & SOL_RESP_MASK) >> 16);
1234  status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1235  & SOL_STS_MASK) >> 8);
1236  flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1237  & SOL_FLAGS_MASK) >> 24) | 0x80;
1238  if (!task->sc) {
1239  if (io_task->scsi_cmnd)
1240  scsi_dma_unmap(io_task->scsi_cmnd);
1241 
1242  return;
1243  }
1244  task->sc->result = (DID_OK << 16) | status;
1245  if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1246  task->sc->result = DID_ERROR << 16;
1247  goto unmap;
1248  }
1249 
1250  /* bidi not initially supported */
1252  resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1253  32] & SOL_RES_CNT_MASK);
1254 
1255  if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1256  task->sc->result = DID_ERROR << 16;
1257 
1258  if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1259  scsi_set_resid(task->sc, resid);
1260  if (!status && (scsi_bufflen(task->sc) - resid <
1261  task->sc->underflow))
1262  task->sc->result = DID_ERROR << 16;
1263  }
1264  }
1265 
1266  if (status == SAM_STAT_CHECK_CONDITION) {
1267  u16 sense_len;
1268  unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1269 
1270  sense = sts_bhs->sense_info + sizeof(unsigned short);
1271  sense_len = be16_to_cpu(*slen);
1272  memcpy(task->sc->sense_buffer, sense,
1273  min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1274  }
1275 
1276  if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1277  if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1278  & SOL_RES_CNT_MASK)
1279  conn->rxdata_octets += (psol->
1280  dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1281  & SOL_RES_CNT_MASK);
1282  }
1283 unmap:
1284  scsi_dma_unmap(io_task->scsi_cmnd);
1285  iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1286 }
1287 
1288 static void
1289 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1290  struct iscsi_task *task, struct sol_cqe *psol)
1291 {
1292  struct iscsi_logout_rsp *hdr;
1293  struct beiscsi_io_task *io_task = task->dd_data;
1294  struct iscsi_conn *conn = beiscsi_conn->conn;
1295 
1296  hdr = (struct iscsi_logout_rsp *)task->hdr;
1297  hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1298  hdr->t2wait = 5;
1299  hdr->t2retain = 0;
1300  hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1301  & SOL_FLAGS_MASK) >> 24) | 0x80;
1302  hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1303  32] & SOL_RESP_MASK);
1304  hdr->exp_cmdsn = cpu_to_be32(psol->
1305  dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1307  hdr->max_cmdsn = be32_to_cpu((psol->
1308  dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1309  & SOL_EXP_CMD_SN_MASK) +
1310  ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1311  / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1312  hdr->dlength[0] = 0;
1313  hdr->dlength[1] = 0;
1314  hdr->dlength[2] = 0;
1315  hdr->hlength = 0;
1316  hdr->itt = io_task->libiscsi_itt;
1317  __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1318 }
1319 
1320 static void
1321 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1322  struct iscsi_task *task, struct sol_cqe *psol)
1323 {
1324  struct iscsi_tm_rsp *hdr;
1325  struct iscsi_conn *conn = beiscsi_conn->conn;
1326  struct beiscsi_io_task *io_task = task->dd_data;
1327 
1328  hdr = (struct iscsi_tm_rsp *)task->hdr;
1330  hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1331  & SOL_FLAGS_MASK) >> 24) | 0x80;
1332  hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1333  32] & SOL_RESP_MASK);
1334  hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1335  i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1336  hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1337  i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1338  ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1339  / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1340  hdr->itt = io_task->libiscsi_itt;
1341  __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1342 }
1343 
1344 static void
1345 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1346  struct beiscsi_hba *phba, struct sol_cqe *psol)
1347 {
1348  struct hwi_wrb_context *pwrb_context;
1349  struct wrb_handle *pwrb_handle = NULL;
1350  struct hwi_controller *phwi_ctrlr;
1351  struct iscsi_task *task;
1352  struct beiscsi_io_task *io_task;
1353  struct iscsi_conn *conn = beiscsi_conn->conn;
1354  struct iscsi_session *session = conn->session;
1355 
1356  phwi_ctrlr = phba->phwi_ctrlr;
1357  pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1358  dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1359  SOL_CID_MASK) >> 6) -
1360  phba->fw_config.iscsi_cid_start];
1361  pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1362  dw[offsetof(struct amap_sol_cqe, wrb_index) /
1363  32] & SOL_WRB_INDEX_MASK) >> 16)];
1364  task = pwrb_handle->pio_handle;
1365 
1366  io_task = task->dd_data;
1367  spin_lock_bh(&phba->mgmt_sgl_lock);
1368  free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1369  spin_unlock_bh(&phba->mgmt_sgl_lock);
1370  spin_lock_bh(&session->lock);
1371  free_wrb_handle(phba, pwrb_context, pwrb_handle);
1372  spin_unlock_bh(&session->lock);
1373 }
1374 
1375 static void
1376 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1377  struct iscsi_task *task, struct sol_cqe *psol)
1378 {
1379  struct iscsi_nopin *hdr;
1380  struct iscsi_conn *conn = beiscsi_conn->conn;
1381  struct beiscsi_io_task *io_task = task->dd_data;
1382 
1383  hdr = (struct iscsi_nopin *)task->hdr;
1384  hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1385  & SOL_FLAGS_MASK) >> 24) | 0x80;
1386  hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1387  i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1388  hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1389  i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1390  ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1391  / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1392  hdr->opcode = ISCSI_OP_NOOP_IN;
1393  hdr->itt = io_task->libiscsi_itt;
1394  __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1395 }
1396 
1397 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1398  struct beiscsi_hba *phba, struct sol_cqe *psol)
1399 {
1400  struct hwi_wrb_context *pwrb_context;
1401  struct wrb_handle *pwrb_handle;
1402  struct iscsi_wrb *pwrb = NULL;
1403  struct hwi_controller *phwi_ctrlr;
1404  struct iscsi_task *task;
1405  unsigned int type;
1406  struct iscsi_conn *conn = beiscsi_conn->conn;
1407  struct iscsi_session *session = conn->session;
1408 
1409  phwi_ctrlr = phba->phwi_ctrlr;
1410  pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1411  (struct amap_sol_cqe, cid) / 32]
1412  & SOL_CID_MASK) >> 6) -
1413  phba->fw_config.iscsi_cid_start];
1414  pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1415  dw[offsetof(struct amap_sol_cqe, wrb_index) /
1416  32] & SOL_WRB_INDEX_MASK) >> 16)];
1417  task = pwrb_handle->pio_handle;
1418  pwrb = pwrb_handle->pwrb;
1419  type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1420  WRB_TYPE_MASK) >> 28;
1421 
1422  spin_lock_bh(&session->lock);
1423  switch (type) {
1424  case HWH_TYPE_IO:
1425  case HWH_TYPE_IO_RD:
1426  if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1428  be_complete_nopin_resp(beiscsi_conn, task, psol);
1429  else
1430  be_complete_io(beiscsi_conn, task, psol);
1431  break;
1432 
1433  case HWH_TYPE_LOGOUT:
1434  if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1435  be_complete_logout(beiscsi_conn, task, psol);
1436  else
1437  be_complete_tmf(beiscsi_conn, task, psol);
1438 
1439  break;
1440 
1441  case HWH_TYPE_LOGIN:
1442  beiscsi_log(phba, KERN_ERR,
1444  "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1445  " hwi_complete_cmd- Solicited path\n");
1446  break;
1447 
1448  case HWH_TYPE_NOP:
1449  be_complete_nopin_resp(beiscsi_conn, task, psol);
1450  break;
1451 
1452  default:
1453  beiscsi_log(phba, KERN_WARNING,
1455  "BM_%d : In hwi_complete_cmd, unknown type = %d"
1456  "wrb_index 0x%x CID 0x%x\n", type,
1457  ((psol->dw[offsetof(struct amap_iscsi_wrb,
1458  type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1459  ((psol->dw[offsetof(struct amap_sol_cqe,
1460  cid) / 32] & SOL_CID_MASK) >> 6));
1461  break;
1462  }
1463 
1464  spin_unlock_bh(&session->lock);
1465 }
1466 
1467 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1468  *pasync_ctx, unsigned int is_header,
1469  unsigned int host_write_ptr)
1470 {
1471  if (is_header)
1472  return &pasync_ctx->async_entry[host_write_ptr].
1473  header_busy_list;
1474  else
1475  return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1476 }
1477 
1478 static struct async_pdu_handle *
1479 hwi_get_async_handle(struct beiscsi_hba *phba,
1480  struct beiscsi_conn *beiscsi_conn,
1481  struct hwi_async_pdu_context *pasync_ctx,
1482  struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1483 {
1484  struct be_bus_address phys_addr;
1485  struct list_head *pbusy_list;
1486  struct async_pdu_handle *pasync_handle = NULL;
1487  unsigned char is_header = 0;
1488 
1489  phys_addr.u.a32.address_lo =
1490  pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1491  ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1492  & PDUCQE_DPL_MASK) >> 16);
1493  phys_addr.u.a32.address_hi =
1494  pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1495 
1496  phys_addr.u.a64.address =
1497  *((unsigned long long *)(&phys_addr.u.a64.address));
1498 
1499  switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1500  & PDUCQE_CODE_MASK) {
1501  case UNSOL_HDR_NOTIFY:
1502  is_header = 1;
1503 
1504  pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1505  (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1506  index) / 32] & PDUCQE_INDEX_MASK));
1507  break;
1508  case UNSOL_DATA_NOTIFY:
1509  pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1510  dw[offsetof(struct amap_i_t_dpdu_cqe,
1511  index) / 32] & PDUCQE_INDEX_MASK));
1512  break;
1513  default:
1514  pbusy_list = NULL;
1515  beiscsi_log(phba, KERN_WARNING,
1517  "BM_%d : Unexpected code=%d\n",
1518  pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1519  code) / 32] & PDUCQE_CODE_MASK);
1520  return NULL;
1521  }
1522 
1523  WARN_ON(list_empty(pbusy_list));
1524  list_for_each_entry(pasync_handle, pbusy_list, link) {
1525  if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1526  break;
1527  }
1528 
1529  WARN_ON(!pasync_handle);
1530 
1531  pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1532  phba->fw_config.iscsi_cid_start;
1533  pasync_handle->is_header = is_header;
1534  pasync_handle->buffer_len = ((pdpdu_cqe->
1535  dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1536  & PDUCQE_DPL_MASK) >> 16);
1537 
1538  *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1539  index) / 32] & PDUCQE_INDEX_MASK);
1540  return pasync_handle;
1541 }
1542 
1543 static unsigned int
1544 hwi_update_async_writables(struct beiscsi_hba *phba,
1545  struct hwi_async_pdu_context *pasync_ctx,
1546  unsigned int is_header, unsigned int cq_index)
1547 {
1548  struct list_head *pbusy_list;
1549  struct async_pdu_handle *pasync_handle;
1550  unsigned int num_entries, writables = 0;
1551  unsigned int *pep_read_ptr, *pwritables;
1552 
1553  num_entries = pasync_ctx->num_entries;
1554  if (is_header) {
1555  pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1556  pwritables = &pasync_ctx->async_header.writables;
1557  } else {
1558  pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1559  pwritables = &pasync_ctx->async_data.writables;
1560  }
1561 
1562  while ((*pep_read_ptr) != cq_index) {
1563  (*pep_read_ptr)++;
1564  *pep_read_ptr = (*pep_read_ptr) % num_entries;
1565 
1566  pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1567  *pep_read_ptr);
1568  if (writables == 0)
1569  WARN_ON(list_empty(pbusy_list));
1570 
1571  if (!list_empty(pbusy_list)) {
1572  pasync_handle = list_entry(pbusy_list->next,
1573  struct async_pdu_handle,
1574  link);
1575  WARN_ON(!pasync_handle);
1576  pasync_handle->consumed = 1;
1577  }
1578 
1579  writables++;
1580  }
1581 
1582  if (!writables) {
1583  beiscsi_log(phba, KERN_ERR,
1585  "BM_%d : Duplicate notification received - index 0x%x!!\n",
1586  cq_index);
1587  WARN_ON(1);
1588  }
1589 
1590  *pwritables = *pwritables + writables;
1591  return 0;
1592 }
1593 
1594 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1595  unsigned int cri)
1596 {
1597  struct hwi_controller *phwi_ctrlr;
1598  struct hwi_async_pdu_context *pasync_ctx;
1599  struct async_pdu_handle *pasync_handle, *tmp_handle;
1600  struct list_head *plist;
1601 
1602  phwi_ctrlr = phba->phwi_ctrlr;
1603  pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1604 
1605  plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1606 
1607  list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1608  list_del(&pasync_handle->link);
1609 
1610  if (pasync_handle->is_header) {
1611  list_add_tail(&pasync_handle->link,
1612  &pasync_ctx->async_header.free_list);
1613  pasync_ctx->async_header.free_entries++;
1614  } else {
1615  list_add_tail(&pasync_handle->link,
1616  &pasync_ctx->async_data.free_list);
1617  pasync_ctx->async_data.free_entries++;
1618  }
1619  }
1620 
1621  INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1622  pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1623  pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1624 }
1625 
1626 static struct phys_addr *
1627 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1628  unsigned int is_header, unsigned int host_write_ptr)
1629 {
1630  struct phys_addr *pasync_sge = NULL;
1631 
1632  if (is_header)
1633  pasync_sge = pasync_ctx->async_header.ring_base;
1634  else
1635  pasync_sge = pasync_ctx->async_data.ring_base;
1636 
1637  return pasync_sge + host_write_ptr;
1638 }
1639 
1640 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1641  unsigned int is_header)
1642 {
1643  struct hwi_controller *phwi_ctrlr;
1644  struct hwi_async_pdu_context *pasync_ctx;
1645  struct async_pdu_handle *pasync_handle;
1646  struct list_head *pfree_link, *pbusy_list;
1647  struct phys_addr *pasync_sge;
1648  unsigned int ring_id, num_entries;
1649  unsigned int host_write_num;
1650  unsigned int writables;
1651  unsigned int i = 0;
1652  u32 doorbell = 0;
1653 
1654  phwi_ctrlr = phba->phwi_ctrlr;
1655  pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1656  num_entries = pasync_ctx->num_entries;
1657 
1658  if (is_header) {
1659  writables = min(pasync_ctx->async_header.writables,
1660  pasync_ctx->async_header.free_entries);
1661  pfree_link = pasync_ctx->async_header.free_list.next;
1662  host_write_num = pasync_ctx->async_header.host_write_ptr;
1663  ring_id = phwi_ctrlr->default_pdu_hdr.id;
1664  } else {
1665  writables = min(pasync_ctx->async_data.writables,
1666  pasync_ctx->async_data.free_entries);
1667  pfree_link = pasync_ctx->async_data.free_list.next;
1668  host_write_num = pasync_ctx->async_data.host_write_ptr;
1669  ring_id = phwi_ctrlr->default_pdu_data.id;
1670  }
1671 
1672  writables = (writables / 8) * 8;
1673  if (writables) {
1674  for (i = 0; i < writables; i++) {
1675  pbusy_list =
1676  hwi_get_async_busy_list(pasync_ctx, is_header,
1677  host_write_num);
1678  pasync_handle =
1679  list_entry(pfree_link, struct async_pdu_handle,
1680  link);
1681  WARN_ON(!pasync_handle);
1682  pasync_handle->consumed = 0;
1683 
1684  pfree_link = pfree_link->next;
1685 
1686  pasync_sge = hwi_get_ring_address(pasync_ctx,
1687  is_header, host_write_num);
1688 
1689  pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1690  pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1691 
1692  list_move(&pasync_handle->link, pbusy_list);
1693 
1694  host_write_num++;
1695  host_write_num = host_write_num % num_entries;
1696  }
1697 
1698  if (is_header) {
1699  pasync_ctx->async_header.host_write_ptr =
1700  host_write_num;
1701  pasync_ctx->async_header.free_entries -= writables;
1702  pasync_ctx->async_header.writables -= writables;
1703  pasync_ctx->async_header.busy_entries += writables;
1704  } else {
1705  pasync_ctx->async_data.host_write_ptr = host_write_num;
1706  pasync_ctx->async_data.free_entries -= writables;
1707  pasync_ctx->async_data.writables -= writables;
1708  pasync_ctx->async_data.busy_entries += writables;
1709  }
1710 
1711  doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1712  doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1713  doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1714  doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1716 
1717  iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1718  }
1719 }
1720 
1721 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1722  struct beiscsi_conn *beiscsi_conn,
1723  struct i_t_dpdu_cqe *pdpdu_cqe)
1724 {
1725  struct hwi_controller *phwi_ctrlr;
1726  struct hwi_async_pdu_context *pasync_ctx;
1727  struct async_pdu_handle *pasync_handle = NULL;
1728  unsigned int cq_index = -1;
1729 
1730  phwi_ctrlr = phba->phwi_ctrlr;
1731  pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1732 
1733  pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1734  pdpdu_cqe, &cq_index);
1735  BUG_ON(pasync_handle->is_header != 0);
1736  if (pasync_handle->consumed == 0)
1737  hwi_update_async_writables(phba, pasync_ctx,
1738  pasync_handle->is_header, cq_index);
1739 
1740  hwi_free_async_msg(phba, pasync_handle->cri);
1741  hwi_post_async_buffers(phba, pasync_handle->is_header);
1742 }
1743 
1744 static unsigned int
1745 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1746  struct beiscsi_hba *phba,
1747  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1748 {
1749  struct list_head *plist;
1750  struct async_pdu_handle *pasync_handle;
1751  void *phdr = NULL;
1752  unsigned int hdr_len = 0, buf_len = 0;
1753  unsigned int status, index = 0, offset = 0;
1754  void *pfirst_buffer = NULL;
1755  unsigned int num_buf = 0;
1756 
1757  plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1758 
1759  list_for_each_entry(pasync_handle, plist, link) {
1760  if (index == 0) {
1761  phdr = pasync_handle->pbuffer;
1762  hdr_len = pasync_handle->buffer_len;
1763  } else {
1764  buf_len = pasync_handle->buffer_len;
1765  if (!num_buf) {
1766  pfirst_buffer = pasync_handle->pbuffer;
1767  num_buf++;
1768  }
1769  memcpy(pfirst_buffer + offset,
1770  pasync_handle->pbuffer, buf_len);
1771  offset += buf_len;
1772  }
1773  index++;
1774  }
1775 
1776  status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1777  (beiscsi_conn->beiscsi_conn_cid -
1778  phba->fw_config.iscsi_cid_start),
1779  phdr, hdr_len, pfirst_buffer,
1780  offset);
1781 
1782  hwi_free_async_msg(phba, cri);
1783  return 0;
1784 }
1785 
1786 static unsigned int
1787 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1788  struct beiscsi_hba *phba,
1789  struct async_pdu_handle *pasync_handle)
1790 {
1791  struct hwi_async_pdu_context *pasync_ctx;
1792  struct hwi_controller *phwi_ctrlr;
1793  unsigned int bytes_needed = 0, status = 0;
1794  unsigned short cri = pasync_handle->cri;
1795  struct pdu_base *ppdu;
1796 
1797  phwi_ctrlr = phba->phwi_ctrlr;
1798  pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1799 
1800  list_del(&pasync_handle->link);
1801  if (pasync_handle->is_header) {
1802  pasync_ctx->async_header.busy_entries--;
1803  if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1804  hwi_free_async_msg(phba, cri);
1805  BUG();
1806  }
1807 
1808  pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1809  pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1810  pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1811  (unsigned short)pasync_handle->buffer_len;
1812  list_add_tail(&pasync_handle->link,
1813  &pasync_ctx->async_entry[cri].wait_queue.list);
1814 
1815  ppdu = pasync_handle->pbuffer;
1816  bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1817  data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1818  0xFFFF0000) | ((be16_to_cpu((ppdu->
1819  dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1820  & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1821 
1822  if (status == 0) {
1823  pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1824  bytes_needed;
1825 
1826  if (bytes_needed == 0)
1827  status = hwi_fwd_async_msg(beiscsi_conn, phba,
1828  pasync_ctx, cri);
1829  }
1830  } else {
1831  pasync_ctx->async_data.busy_entries--;
1832  if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1833  list_add_tail(&pasync_handle->link,
1834  &pasync_ctx->async_entry[cri].wait_queue.
1835  list);
1836  pasync_ctx->async_entry[cri].wait_queue.
1837  bytes_received +=
1838  (unsigned short)pasync_handle->buffer_len;
1839 
1840  if (pasync_ctx->async_entry[cri].wait_queue.
1841  bytes_received >=
1842  pasync_ctx->async_entry[cri].wait_queue.
1843  bytes_needed)
1844  status = hwi_fwd_async_msg(beiscsi_conn, phba,
1845  pasync_ctx, cri);
1846  }
1847  }
1848  return status;
1849 }
1850 
1851 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1852  struct beiscsi_hba *phba,
1853  struct i_t_dpdu_cqe *pdpdu_cqe)
1854 {
1855  struct hwi_controller *phwi_ctrlr;
1856  struct hwi_async_pdu_context *pasync_ctx;
1857  struct async_pdu_handle *pasync_handle = NULL;
1858  unsigned int cq_index = -1;
1859 
1860  phwi_ctrlr = phba->phwi_ctrlr;
1861  pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1862  pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1863  pdpdu_cqe, &cq_index);
1864 
1865  if (pasync_handle->consumed == 0)
1866  hwi_update_async_writables(phba, pasync_ctx,
1867  pasync_handle->is_header, cq_index);
1868 
1869  hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1870  hwi_post_async_buffers(phba, pasync_handle->is_header);
1871 }
1872 
1873 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1874 {
1875  struct be_queue_info *mcc_cq;
1876  struct be_mcc_compl *mcc_compl;
1877  unsigned int num_processed = 0;
1878 
1879  mcc_cq = &phba->ctrl.mcc_obj.cq;
1880  mcc_compl = queue_tail_node(mcc_cq);
1881  mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1882  while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1883 
1884  if (num_processed >= 32) {
1885  hwi_ring_cq_db(phba, mcc_cq->id,
1886  num_processed, 0, 0);
1887  num_processed = 0;
1888  }
1889  if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1890  /* Interpret flags as an async trailer */
1891  if (is_link_state_evt(mcc_compl->flags))
1892  /* Interpret compl as a async link evt */
1894  (struct be_async_event_link_state *) mcc_compl);
1895  else
1897  "BM_%d : Unsupported Async Event, flags"
1898  " = 0x%08x\n",
1899  mcc_compl->flags);
1900  } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1901  be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1902  atomic_dec(&phba->ctrl.mcc_obj.q.used);
1903  }
1904 
1905  mcc_compl->flags = 0;
1906  queue_tail_inc(mcc_cq);
1907  mcc_compl = queue_tail_node(mcc_cq);
1908  mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1909  num_processed++;
1910  }
1911 
1912  if (num_processed > 0)
1913  hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1914 
1915 }
1916 
1917 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1918 {
1919  struct be_queue_info *cq;
1920  struct sol_cqe *sol;
1921  struct dmsg_cqe *dmsg;
1922  unsigned int num_processed = 0;
1923  unsigned int tot_nump = 0;
1924  unsigned short code = 0, cid = 0;
1925  struct beiscsi_conn *beiscsi_conn;
1926  struct beiscsi_endpoint *beiscsi_ep;
1927  struct iscsi_endpoint *ep;
1928  struct beiscsi_hba *phba;
1929 
1930  cq = pbe_eq->cq;
1931  sol = queue_tail_node(cq);
1932  phba = pbe_eq->phba;
1933 
1934  while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1935  CQE_VALID_MASK) {
1936  be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1937 
1938  cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1939  CQE_CID_MASK) >> 6);
1940  code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1941  CQE_CODE_MASK);
1942  ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
1943 
1944  beiscsi_ep = ep->dd_data;
1945  beiscsi_conn = beiscsi_ep->conn;
1946 
1947  if (num_processed >= 32) {
1948  hwi_ring_cq_db(phba, cq->id,
1949  num_processed, 0, 0);
1950  tot_nump += num_processed;
1951  num_processed = 0;
1952  }
1953 
1954  switch (code) {
1955  case SOL_CMD_COMPLETE:
1956  hwi_complete_cmd(beiscsi_conn, phba, sol);
1957  break;
1958  case DRIVERMSG_NOTIFY:
1959  beiscsi_log(phba, KERN_INFO,
1961  "BM_%d : Received DRIVERMSG_NOTIFY\n");
1962 
1963  dmsg = (struct dmsg_cqe *)sol;
1964  hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1965  break;
1966  case UNSOL_HDR_NOTIFY:
1967  beiscsi_log(phba, KERN_INFO,
1969  "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
1970 
1971  hwi_process_default_pdu_ring(beiscsi_conn, phba,
1972  (struct i_t_dpdu_cqe *)sol);
1973  break;
1974  case UNSOL_DATA_NOTIFY:
1975  beiscsi_log(phba, KERN_INFO,
1977  "BM_%d : Received UNSOL_DATA_NOTIFY\n");
1978 
1979  hwi_process_default_pdu_ring(beiscsi_conn, phba,
1980  (struct i_t_dpdu_cqe *)sol);
1981  break;
1984  case CXN_INVALIDATE_NOTIFY:
1985  beiscsi_log(phba, KERN_ERR,
1987  "BM_%d : Ignoring CQ Error notification for"
1988  " cmd/cxn invalidate\n");
1989  break;
1998  beiscsi_log(phba, KERN_ERR,
2000  "BM_%d : CQ Error notification for cmd.. "
2001  "code %d cid 0x%x\n", code, cid);
2002  break;
2004  beiscsi_log(phba, KERN_ERR,
2006  "BM_%d : Digest error on def pdu ring,"
2007  " dropping..\n");
2008  hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2009  (struct i_t_dpdu_cqe *) sol);
2010  break;
2013  case CXN_KILLED_AHS_RCVD:
2018  case CXN_KILLED_TIMED_OUT:
2019  case CXN_KILLED_FIN_RCVD:
2025  beiscsi_log(phba, KERN_ERR,
2027  "BM_%d : CQ Error %d, reset CID 0x%x...\n",
2028  code, cid);
2029  if (beiscsi_conn)
2030  iscsi_conn_failure(beiscsi_conn->conn,
2032  break;
2033  case CXN_KILLED_RST_SENT:
2034  case CXN_KILLED_RST_RCVD:
2035  beiscsi_log(phba, KERN_ERR,
2037  "BM_%d : CQ Error %d, reset"
2038  "received/sent on CID 0x%x...\n",
2039  code, cid);
2040  if (beiscsi_conn)
2041  iscsi_conn_failure(beiscsi_conn->conn,
2043  break;
2044  default:
2045  beiscsi_log(phba, KERN_ERR,
2047  "BM_%d : CQ Error Invalid code= %d "
2048  "received on CID 0x%x...\n",
2049  code, cid);
2050  break;
2051  }
2052 
2053  AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2054  queue_tail_inc(cq);
2055  sol = queue_tail_node(cq);
2056  num_processed++;
2057  }
2058 
2059  if (num_processed > 0) {
2060  tot_nump += num_processed;
2061  hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
2062  }
2063  return tot_nump;
2064 }
2065 
2067 {
2068  unsigned long flags;
2069  struct hwi_controller *phwi_ctrlr;
2070  struct hwi_context_memory *phwi_context;
2071  struct be_eq_obj *pbe_eq;
2072  struct beiscsi_hba *phba =
2073  container_of(work, struct beiscsi_hba, work_cqs);
2074 
2075  phwi_ctrlr = phba->phwi_ctrlr;
2076  phwi_context = phwi_ctrlr->phwi_ctxt;
2077  if (phba->msix_enabled)
2078  pbe_eq = &phwi_context->be_eq[phba->num_cpus];
2079  else
2080  pbe_eq = &phwi_context->be_eq[0];
2081 
2082  if (phba->todo_mcc_cq) {
2083  spin_lock_irqsave(&phba->isr_lock, flags);
2084  phba->todo_mcc_cq = 0;
2085  spin_unlock_irqrestore(&phba->isr_lock, flags);
2086  beiscsi_process_mcc_isr(phba);
2087  }
2088 
2089  if (phba->todo_cq) {
2090  spin_lock_irqsave(&phba->isr_lock, flags);
2091  phba->todo_cq = 0;
2092  spin_unlock_irqrestore(&phba->isr_lock, flags);
2093  beiscsi_process_cq(pbe_eq);
2094  }
2095 }
2096 
2097 static int be_iopoll(struct blk_iopoll *iop, int budget)
2098 {
2099  static unsigned int ret;
2100  struct beiscsi_hba *phba;
2101  struct be_eq_obj *pbe_eq;
2102 
2103  pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2104  ret = beiscsi_process_cq(pbe_eq);
2105  if (ret < budget) {
2106  phba = pbe_eq->phba;
2107  blk_iopoll_complete(iop);
2108  beiscsi_log(phba, KERN_INFO,
2110  "BM_%d : rearm pbe_eq->q.id =%d\n",
2111  pbe_eq->q.id);
2112  hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2113  }
2114  return ret;
2115 }
2116 
2117 static void
2118 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2119  unsigned int num_sg, struct beiscsi_io_task *io_task)
2120 {
2121  struct iscsi_sge *psgl;
2122  unsigned int sg_len, index;
2123  unsigned int sge_len = 0;
2124  unsigned long long addr;
2125  struct scatterlist *l_sg;
2126  unsigned int offset;
2127 
2128  AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2129  io_task->bhs_pa.u.a32.address_lo);
2130  AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2131  io_task->bhs_pa.u.a32.address_hi);
2132 
2133  l_sg = sg;
2134  for (index = 0; (index < num_sg) && (index < 2); index++,
2135  sg = sg_next(sg)) {
2136  if (index == 0) {
2137  sg_len = sg_dma_len(sg);
2138  addr = (u64) sg_dma_address(sg);
2139  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2140  ((u32)(addr & 0xFFFFFFFF)));
2141  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2142  ((u32)(addr >> 32)));
2143  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2144  sg_len);
2145  sge_len = sg_len;
2146  } else {
2147  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2148  pwrb, sge_len);
2149  sg_len = sg_dma_len(sg);
2150  addr = (u64) sg_dma_address(sg);
2151  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2152  ((u32)(addr & 0xFFFFFFFF)));
2153  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2154  ((u32)(addr >> 32)));
2155  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2156  sg_len);
2157  }
2158  }
2159  psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2160  memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2161 
2162  AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2163 
2164  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2165  io_task->bhs_pa.u.a32.address_hi);
2166  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2167  io_task->bhs_pa.u.a32.address_lo);
2168 
2169  if (num_sg == 1) {
2170  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2171  1);
2172  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2173  0);
2174  } else if (num_sg == 2) {
2175  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2176  0);
2177  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2178  1);
2179  } else {
2180  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2181  0);
2182  AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2183  0);
2184  }
2185  sg = l_sg;
2186  psgl++;
2187  psgl++;
2188  offset = 0;
2189  for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2190  sg_len = sg_dma_len(sg);
2191  addr = (u64) sg_dma_address(sg);
2192  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2193  (addr & 0xFFFFFFFF));
2194  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2195  (addr >> 32));
2196  AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2197  AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2198  AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2199  offset += sg_len;
2200  }
2201  psgl--;
2202  AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2203 }
2204 
2205 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2206 {
2207  struct iscsi_sge *psgl;
2208  unsigned long long addr;
2209  struct beiscsi_io_task *io_task = task->dd_data;
2210  struct beiscsi_conn *beiscsi_conn = io_task->conn;
2211  struct beiscsi_hba *phba = beiscsi_conn->phba;
2212 
2213  io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2214  AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2215  io_task->bhs_pa.u.a32.address_lo);
2216  AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2217  io_task->bhs_pa.u.a32.address_hi);
2218 
2219  if (task->data) {
2220  if (task->data_count) {
2221  AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2222  addr = (u64) pci_map_single(phba->pcidev,
2223  task->data,
2224  task->data_count, 1);
2225  } else {
2226  AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2227  addr = 0;
2228  }
2229  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2230  ((u32)(addr & 0xFFFFFFFF)));
2231  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2232  ((u32)(addr >> 32)));
2233  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2234  task->data_count);
2235 
2236  AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2237  } else {
2238  AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2239  addr = 0;
2240  }
2241 
2242  psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2243 
2244  AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2245 
2246  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2247  io_task->bhs_pa.u.a32.address_hi);
2248  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2249  io_task->bhs_pa.u.a32.address_lo);
2250  if (task->data) {
2251  psgl++;
2252  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2253  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2254  AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2255  AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2256  AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2257  AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2258 
2259  psgl++;
2260  if (task->data) {
2261  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2262  ((u32)(addr & 0xFFFFFFFF)));
2263  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2264  ((u32)(addr >> 32)));
2265  }
2266  AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2267  }
2268  AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2269 }
2270 
2271 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2272 {
2273  unsigned int num_cq_pages, num_async_pdu_buf_pages;
2274  unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2275  unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2276 
2277  num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2278  sizeof(struct sol_cqe));
2279  num_async_pdu_buf_pages =
2280  PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2281  phba->params.defpdu_hdr_sz);
2282  num_async_pdu_buf_sgl_pages =
2283  PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2284  sizeof(struct phys_addr));
2285  num_async_pdu_data_pages =
2286  PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2287  phba->params.defpdu_data_sz);
2288  num_async_pdu_data_sgl_pages =
2289  PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2290  sizeof(struct phys_addr));
2291 
2292  phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2293 
2294  phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2296  phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2297  sizeof(struct hwi_context_memory);
2298 
2299 
2300  phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2301  * (phba->params.wrbs_per_cxn)
2302  * phba->params.cxns_per_ctrl;
2303  wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2304  (phba->params.wrbs_per_cxn);
2305  phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2306  phba->params.cxns_per_ctrl);
2307 
2308  phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2309  phba->params.icds_per_ctrl;
2310  phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2311  phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2312 
2314  num_async_pdu_buf_pages * PAGE_SIZE;
2316  num_async_pdu_data_pages * PAGE_SIZE;
2318  num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2320  num_async_pdu_data_sgl_pages * PAGE_SIZE;
2322  phba->params.asyncpdus_per_ctrl *
2323  sizeof(struct async_pdu_handle);
2325  phba->params.asyncpdus_per_ctrl *
2326  sizeof(struct async_pdu_handle);
2328  sizeof(struct hwi_async_pdu_context) +
2329  (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2330 }
2331 
2332 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2333 {
2334  struct be_mem_descriptor *mem_descr;
2335  dma_addr_t bus_add;
2336  struct mem_array *mem_arr, *mem_arr_orig;
2337  unsigned int i, j, alloc_size, curr_alloc_size;
2338 
2339  phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2340  if (!phba->phwi_ctrlr)
2341  return -ENOMEM;
2342 
2343  phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2344  GFP_KERNEL);
2345  if (!phba->init_mem) {
2346  kfree(phba->phwi_ctrlr);
2347  return -ENOMEM;
2348  }
2349 
2350  mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2351  GFP_KERNEL);
2352  if (!mem_arr_orig) {
2353  kfree(phba->init_mem);
2354  kfree(phba->phwi_ctrlr);
2355  return -ENOMEM;
2356  }
2357 
2358  mem_descr = phba->init_mem;
2359  for (i = 0; i < SE_MEM_MAX; i++) {
2360  j = 0;
2361  mem_arr = mem_arr_orig;
2362  alloc_size = phba->mem_req[i];
2363  memset(mem_arr, 0, sizeof(struct mem_array) *
2365  curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2366  do {
2368  phba->pcidev,
2369  curr_alloc_size,
2370  &bus_add);
2371  if (!mem_arr->virtual_address) {
2372  if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2373  goto free_mem;
2374  if (curr_alloc_size -
2375  rounddown_pow_of_two(curr_alloc_size))
2376  curr_alloc_size = rounddown_pow_of_two
2377  (curr_alloc_size);
2378  else
2379  curr_alloc_size = curr_alloc_size / 2;
2380  } else {
2381  mem_arr->bus_address.u.
2382  a64.address = (__u64) bus_add;
2383  mem_arr->size = curr_alloc_size;
2384  alloc_size -= curr_alloc_size;
2385  curr_alloc_size = min(be_max_phys_size *
2386  1024, alloc_size);
2387  j++;
2388  mem_arr++;
2389  }
2390  } while (alloc_size);
2391  mem_descr->num_elements = j;
2392  mem_descr->size_in_bytes = phba->mem_req[i];
2393  mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2394  GFP_KERNEL);
2395  if (!mem_descr->mem_array)
2396  goto free_mem;
2397 
2398  memcpy(mem_descr->mem_array, mem_arr_orig,
2399  sizeof(struct mem_array) * j);
2400  mem_descr++;
2401  }
2402  kfree(mem_arr_orig);
2403  return 0;
2404 free_mem:
2405  mem_descr->num_elements = j;
2406  while ((i) || (j)) {
2407  for (j = mem_descr->num_elements; j > 0; j--) {
2409  mem_descr->mem_array[j - 1].size,
2410  mem_descr->mem_array[j - 1].
2412  (unsigned long)mem_descr->
2413  mem_array[j - 1].
2414  bus_address.u.a64.address);
2415  }
2416  if (i) {
2417  i--;
2418  kfree(mem_descr->mem_array);
2419  mem_descr--;
2420  }
2421  }
2422  kfree(mem_arr_orig);
2423  kfree(phba->init_mem);
2424  kfree(phba->phwi_ctrlr);
2425  return -ENOMEM;
2426 }
2427 
2428 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2429 {
2430  beiscsi_find_mem_req(phba);
2431  return beiscsi_alloc_mem(phba);
2432 }
2433 
2434 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2435 {
2436  struct pdu_data_out *pdata_out;
2437  struct pdu_nop_out *pnop_out;
2438  struct be_mem_descriptor *mem_descr;
2439 
2440  mem_descr = phba->init_mem;
2441  mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2442  pdata_out =
2443  (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2444  memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2445 
2446  AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2447  IIOC_SCSI_DATA);
2448 
2449  pnop_out =
2450  (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2451  virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2452 
2453  memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2454  AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2455  AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2456  AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2457 }
2458 
2459 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2460 {
2461  struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2462  struct wrb_handle *pwrb_handle = NULL;
2463  struct hwi_controller *phwi_ctrlr;
2464  struct hwi_wrb_context *pwrb_context;
2465  struct iscsi_wrb *pwrb = NULL;
2466  unsigned int num_cxn_wrbh = 0;
2467  unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2468 
2469  mem_descr_wrbh = phba->init_mem;
2470  mem_descr_wrbh += HWI_MEM_WRBH;
2471 
2472  mem_descr_wrb = phba->init_mem;
2473  mem_descr_wrb += HWI_MEM_WRB;
2474  phwi_ctrlr = phba->phwi_ctrlr;
2475 
2476  for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2477  pwrb_context = &phwi_ctrlr->wrb_context[index];
2478  pwrb_context->pwrb_handle_base =
2479  kzalloc(sizeof(struct wrb_handle *) *
2480  phba->params.wrbs_per_cxn, GFP_KERNEL);
2481  if (!pwrb_context->pwrb_handle_base) {
2483  "BM_%d : Mem Alloc Failed. Failing to load\n");
2484  goto init_wrb_hndl_failed;
2485  }
2486  pwrb_context->pwrb_handle_basestd =
2487  kzalloc(sizeof(struct wrb_handle *) *
2488  phba->params.wrbs_per_cxn, GFP_KERNEL);
2489  if (!pwrb_context->pwrb_handle_basestd) {
2491  "BM_%d : Mem Alloc Failed. Failing to load\n");
2492  goto init_wrb_hndl_failed;
2493  }
2494  if (!num_cxn_wrbh) {
2495  pwrb_handle =
2496  mem_descr_wrbh->mem_array[idx].virtual_address;
2497  num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2498  ((sizeof(struct wrb_handle)) *
2499  phba->params.wrbs_per_cxn));
2500  idx++;
2501  }
2502  pwrb_context->alloc_index = 0;
2503  pwrb_context->wrb_handles_available = 0;
2504  pwrb_context->free_index = 0;
2505 
2506  if (num_cxn_wrbh) {
2507  for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2508  pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2509  pwrb_context->pwrb_handle_basestd[j] =
2510  pwrb_handle;
2511  pwrb_context->wrb_handles_available++;
2512  pwrb_handle->wrb_index = j;
2513  pwrb_handle++;
2514  }
2515  num_cxn_wrbh--;
2516  }
2517  }
2518  idx = 0;
2519  for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2520  pwrb_context = &phwi_ctrlr->wrb_context[index];
2521  if (!num_cxn_wrb) {
2522  pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2523  num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2524  ((sizeof(struct iscsi_wrb) *
2525  phba->params.wrbs_per_cxn));
2526  idx++;
2527  }
2528 
2529  if (num_cxn_wrb) {
2530  for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2531  pwrb_handle = pwrb_context->pwrb_handle_base[j];
2532  pwrb_handle->pwrb = pwrb;
2533  pwrb++;
2534  }
2535  num_cxn_wrb--;
2536  }
2537  }
2538  return 0;
2539 init_wrb_hndl_failed:
2540  for (j = index; j > 0; j--) {
2541  pwrb_context = &phwi_ctrlr->wrb_context[j];
2542  kfree(pwrb_context->pwrb_handle_base);
2543  kfree(pwrb_context->pwrb_handle_basestd);
2544  }
2545  return -ENOMEM;
2546 }
2547 
2548 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2549 {
2550  struct hwi_controller *phwi_ctrlr;
2551  struct hba_parameters *p = &phba->params;
2552  struct hwi_async_pdu_context *pasync_ctx;
2553  struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2554  unsigned int index, idx, num_per_mem, num_async_data;
2555  struct be_mem_descriptor *mem_descr;
2556 
2557  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2558  mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2559 
2560  phwi_ctrlr = phba->phwi_ctrlr;
2561  phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2562  mem_descr->mem_array[0].virtual_address;
2563  pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2564  memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2565 
2566  pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2567  pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2568 
2569  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2570  mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2571  if (mem_descr->mem_array[0].virtual_address) {
2573  "BM_%d : hwi_init_async_pdu_ctx"
2574  " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2575  mem_descr->mem_array[0].virtual_address);
2576  } else
2578  "BM_%d : No Virtual address\n");
2579 
2580  pasync_ctx->async_header.va_base =
2581  mem_descr->mem_array[0].virtual_address;
2582 
2583  pasync_ctx->async_header.pa_base.u.a64.address =
2584  mem_descr->mem_array[0].bus_address.u.a64.address;
2585 
2586  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2587  mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2588  if (mem_descr->mem_array[0].virtual_address) {
2590  "BM_%d : hwi_init_async_pdu_ctx"
2591  " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2592  mem_descr->mem_array[0].virtual_address);
2593  } else
2595  "BM_%d : No Virtual address\n");
2596 
2597  pasync_ctx->async_header.ring_base =
2598  mem_descr->mem_array[0].virtual_address;
2599 
2600  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2601  mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2602  if (mem_descr->mem_array[0].virtual_address) {
2604  "BM_%d : hwi_init_async_pdu_ctx"
2605  " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2606  mem_descr->mem_array[0].virtual_address);
2607  } else
2609  "BM_%d : No Virtual address\n");
2610 
2611  pasync_ctx->async_header.handle_base =
2612  mem_descr->mem_array[0].virtual_address;
2613  pasync_ctx->async_header.writables = 0;
2614  INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2615 
2616 
2617  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2618  mem_descr += HWI_MEM_ASYNC_DATA_RING;
2619  if (mem_descr->mem_array[0].virtual_address) {
2621  "BM_%d : hwi_init_async_pdu_ctx"
2622  " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2623  mem_descr->mem_array[0].virtual_address);
2624  } else
2626  "BM_%d : No Virtual address\n");
2627 
2628  pasync_ctx->async_data.ring_base =
2629  mem_descr->mem_array[0].virtual_address;
2630 
2631  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2632  mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2633  if (!mem_descr->mem_array[0].virtual_address)
2635  "BM_%d : No Virtual address\n");
2636 
2637  pasync_ctx->async_data.handle_base =
2638  mem_descr->mem_array[0].virtual_address;
2639  pasync_ctx->async_data.writables = 0;
2640  INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2641 
2642  pasync_header_h =
2643  (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2644  pasync_data_h =
2645  (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2646 
2647  mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2648  mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2649  if (mem_descr->mem_array[0].virtual_address) {
2651  "BM_%d : hwi_init_async_pdu_ctx"
2652  " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2653  mem_descr->mem_array[0].virtual_address);
2654  } else
2656  "BM_%d : No Virtual address\n");
2657 
2658  idx = 0;
2659  pasync_ctx->async_data.va_base =
2660  mem_descr->mem_array[idx].virtual_address;
2661  pasync_ctx->async_data.pa_base.u.a64.address =
2662  mem_descr->mem_array[idx].bus_address.u.a64.address;
2663 
2664  num_async_data = ((mem_descr->mem_array[idx].size) /
2665  phba->params.defpdu_data_sz);
2666  num_per_mem = 0;
2667 
2668  for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2669  pasync_header_h->cri = -1;
2670  pasync_header_h->index = (char)index;
2671  INIT_LIST_HEAD(&pasync_header_h->link);
2672  pasync_header_h->pbuffer =
2673  (void *)((unsigned long)
2674  (pasync_ctx->async_header.va_base) +
2675  (p->defpdu_hdr_sz * index));
2676 
2677  pasync_header_h->pa.u.a64.address =
2678  pasync_ctx->async_header.pa_base.u.a64.address +
2679  (p->defpdu_hdr_sz * index);
2680 
2681  list_add_tail(&pasync_header_h->link,
2682  &pasync_ctx->async_header.free_list);
2683  pasync_header_h++;
2684  pasync_ctx->async_header.free_entries++;
2685  pasync_ctx->async_header.writables++;
2686 
2687  INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2688  INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2689  header_busy_list);
2690  pasync_data_h->cri = -1;
2691  pasync_data_h->index = (char)index;
2692  INIT_LIST_HEAD(&pasync_data_h->link);
2693 
2694  if (!num_async_data) {
2695  num_per_mem = 0;
2696  idx++;
2697  pasync_ctx->async_data.va_base =
2698  mem_descr->mem_array[idx].virtual_address;
2699  pasync_ctx->async_data.pa_base.u.a64.address =
2700  mem_descr->mem_array[idx].
2701  bus_address.u.a64.address;
2702 
2703  num_async_data = ((mem_descr->mem_array[idx].size) /
2704  phba->params.defpdu_data_sz);
2705  }
2706  pasync_data_h->pbuffer =
2707  (void *)((unsigned long)
2708  (pasync_ctx->async_data.va_base) +
2709  (p->defpdu_data_sz * num_per_mem));
2710 
2711  pasync_data_h->pa.u.a64.address =
2712  pasync_ctx->async_data.pa_base.u.a64.address +
2713  (p->defpdu_data_sz * num_per_mem);
2714  num_per_mem++;
2715  num_async_data--;
2716 
2717  list_add_tail(&pasync_data_h->link,
2718  &pasync_ctx->async_data.free_list);
2719  pasync_data_h++;
2720  pasync_ctx->async_data.free_entries++;
2721  pasync_ctx->async_data.writables++;
2722 
2723  INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2724  }
2725 
2726  pasync_ctx->async_header.host_write_ptr = 0;
2727  pasync_ctx->async_header.ep_read_ptr = -1;
2728  pasync_ctx->async_data.host_write_ptr = 0;
2729  pasync_ctx->async_data.ep_read_ptr = -1;
2730 }
2731 
2732 static int
2733 be_sgl_create_contiguous(void *virtual_address,
2734  u64 physical_address, u32 length,
2735  struct be_dma_mem *sgl)
2736 {
2737  WARN_ON(!virtual_address);
2738  WARN_ON(!physical_address);
2739  WARN_ON(!length > 0);
2740  WARN_ON(!sgl);
2741 
2742  sgl->va = virtual_address;
2743  sgl->dma = (unsigned long)physical_address;
2744  sgl->size = length;
2745 
2746  return 0;
2747 }
2748 
2749 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2750 {
2751  memset(sgl, 0, sizeof(*sgl));
2752 }
2753 
2754 static void
2755 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2756  struct mem_array *pmem, struct be_dma_mem *sgl)
2757 {
2758  if (sgl->va)
2759  be_sgl_destroy_contiguous(sgl);
2760 
2761  be_sgl_create_contiguous(pmem->virtual_address,
2762  pmem->bus_address.u.a64.address,
2763  pmem->size, sgl);
2764 }
2765 
2766 static void
2767 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2768  struct mem_array *pmem, struct be_dma_mem *sgl)
2769 {
2770  if (sgl->va)
2771  be_sgl_destroy_contiguous(sgl);
2772 
2773  be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2774  pmem->bus_address.u.a64.address,
2775  pmem->size, sgl);
2776 }
2777 
2778 static int be_fill_queue(struct be_queue_info *q,
2779  u16 len, u16 entry_size, void *vaddress)
2780 {
2781  struct be_dma_mem *mem = &q->dma_mem;
2782 
2783  memset(q, 0, sizeof(*q));
2784  q->len = len;
2785  q->entry_size = entry_size;
2786  mem->size = len * entry_size;
2787  mem->va = vaddress;
2788  if (!mem->va)
2789  return -ENOMEM;
2790  memset(mem->va, 0, mem->size);
2791  return 0;
2792 }
2793 
2794 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2795  struct hwi_context_memory *phwi_context)
2796 {
2797  unsigned int i, num_eq_pages;
2798  int ret = 0, eq_for_mcc;
2799  struct be_queue_info *eq;
2800  struct be_dma_mem *mem;
2801  void *eq_vaddress;
2802  dma_addr_t paddr;
2803 
2804  num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2805  sizeof(struct be_eq_entry));
2806 
2807  if (phba->msix_enabled)
2808  eq_for_mcc = 1;
2809  else
2810  eq_for_mcc = 0;
2811  for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2812  eq = &phwi_context->be_eq[i].q;
2813  mem = &eq->dma_mem;
2814  phwi_context->be_eq[i].phba = phba;
2815  eq_vaddress = pci_alloc_consistent(phba->pcidev,
2816  num_eq_pages * PAGE_SIZE,
2817  &paddr);
2818  if (!eq_vaddress)
2819  goto create_eq_error;
2820 
2821  mem->va = eq_vaddress;
2822  ret = be_fill_queue(eq, phba->params.num_eq_entries,
2823  sizeof(struct be_eq_entry), eq_vaddress);
2824  if (ret) {
2826  "BM_%d : be_fill_queue Failed for EQ\n");
2827  goto create_eq_error;
2828  }
2829 
2830  mem->dma = paddr;
2831  ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2832  phwi_context->cur_eqd);
2833  if (ret) {
2835  "BM_%d : beiscsi_cmd_eq_create"
2836  "Failed for EQ\n");
2837  goto create_eq_error;
2838  }
2839 
2841  "BM_%d : eqid = %d\n",
2842  phwi_context->be_eq[i].q.id);
2843  }
2844  return 0;
2845 create_eq_error:
2846  for (i = 0; i < (phba->num_cpus + 1); i++) {
2847  eq = &phwi_context->be_eq[i].q;
2848  mem = &eq->dma_mem;
2849  if (mem->va)
2850  pci_free_consistent(phba->pcidev, num_eq_pages
2851  * PAGE_SIZE,
2852  mem->va, mem->dma);
2853  }
2854  return ret;
2855 }
2856 
2857 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2858  struct hwi_context_memory *phwi_context)
2859 {
2860  unsigned int i, num_cq_pages;
2861  int ret = 0;
2862  struct be_queue_info *cq, *eq;
2863  struct be_dma_mem *mem;
2864  struct be_eq_obj *pbe_eq;
2865  void *cq_vaddress;
2866  dma_addr_t paddr;
2867 
2868  num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2869  sizeof(struct sol_cqe));
2870 
2871  for (i = 0; i < phba->num_cpus; i++) {
2872  cq = &phwi_context->be_cq[i];
2873  eq = &phwi_context->be_eq[i].q;
2874  pbe_eq = &phwi_context->be_eq[i];
2875  pbe_eq->cq = cq;
2876  pbe_eq->phba = phba;
2877  mem = &cq->dma_mem;
2878  cq_vaddress = pci_alloc_consistent(phba->pcidev,
2879  num_cq_pages * PAGE_SIZE,
2880  &paddr);
2881  if (!cq_vaddress)
2882  goto create_cq_error;
2883  ret = be_fill_queue(cq, phba->params.num_cq_entries,
2884  sizeof(struct sol_cqe), cq_vaddress);
2885  if (ret) {
2887  "BM_%d : be_fill_queue Failed "
2888  "for ISCSI CQ\n");
2889  goto create_cq_error;
2890  }
2891 
2892  mem->dma = paddr;
2893  ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2894  false, 0);
2895  if (ret) {
2897  "BM_%d : beiscsi_cmd_eq_create"
2898  "Failed for ISCSI CQ\n");
2899  goto create_cq_error;
2900  }
2902  "BM_%d : iscsi cq_id is %d for eq_id %d\n"
2903  "iSCSI CQ CREATED\n", cq->id, eq->id);
2904  }
2905  return 0;
2906 
2907 create_cq_error:
2908  for (i = 0; i < phba->num_cpus; i++) {
2909  cq = &phwi_context->be_cq[i];
2910  mem = &cq->dma_mem;
2911  if (mem->va)
2912  pci_free_consistent(phba->pcidev, num_cq_pages
2913  * PAGE_SIZE,
2914  mem->va, mem->dma);
2915  }
2916  return ret;
2917 
2918 }
2919 
2920 static int
2921 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2922  struct hwi_context_memory *phwi_context,
2923  struct hwi_controller *phwi_ctrlr,
2924  unsigned int def_pdu_ring_sz)
2925 {
2926  unsigned int idx;
2927  int ret;
2928  struct be_queue_info *dq, *cq;
2929  struct be_dma_mem *mem;
2930  struct be_mem_descriptor *mem_descr;
2931  void *dq_vaddress;
2932 
2933  idx = 0;
2934  dq = &phwi_context->be_def_hdrq;
2935  cq = &phwi_context->be_cq[0];
2936  mem = &dq->dma_mem;
2937  mem_descr = phba->init_mem;
2938  mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2939  dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2940  ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2941  sizeof(struct phys_addr),
2942  sizeof(struct phys_addr), dq_vaddress);
2943  if (ret) {
2945  "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
2946  return ret;
2947  }
2948  mem->dma = (unsigned long)mem_descr->mem_array[idx].
2949  bus_address.u.a64.address;
2950  ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2951  def_pdu_ring_sz,
2952  phba->params.defpdu_hdr_sz);
2953  if (ret) {
2955  "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2956  return ret;
2957  }
2958  phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2960  "BM_%d : iscsi def pdu id is %d\n",
2961  phwi_context->be_def_hdrq.id);
2962 
2963  hwi_post_async_buffers(phba, 1);
2964  return 0;
2965 }
2966 
2967 static int
2968 beiscsi_create_def_data(struct beiscsi_hba *phba,
2969  struct hwi_context_memory *phwi_context,
2970  struct hwi_controller *phwi_ctrlr,
2971  unsigned int def_pdu_ring_sz)
2972 {
2973  unsigned int idx;
2974  int ret;
2975  struct be_queue_info *dataq, *cq;
2976  struct be_dma_mem *mem;
2977  struct be_mem_descriptor *mem_descr;
2978  void *dq_vaddress;
2979 
2980  idx = 0;
2981  dataq = &phwi_context->be_def_dataq;
2982  cq = &phwi_context->be_cq[0];
2983  mem = &dataq->dma_mem;
2984  mem_descr = phba->init_mem;
2985  mem_descr += HWI_MEM_ASYNC_DATA_RING;
2986  dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2987  ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2988  sizeof(struct phys_addr),
2989  sizeof(struct phys_addr), dq_vaddress);
2990  if (ret) {
2992  "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
2993  return ret;
2994  }
2995  mem->dma = (unsigned long)mem_descr->mem_array[idx].
2996  bus_address.u.a64.address;
2997  ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2998  def_pdu_ring_sz,
2999  phba->params.defpdu_data_sz);
3000  if (ret) {
3002  "BM_%d be_cmd_create_default_pdu_queue"
3003  " Failed for DEF PDU DATA\n");
3004  return ret;
3005  }
3006  phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
3008  "BM_%d : iscsi def data id is %d\n",
3009  phwi_context->be_def_dataq.id);
3010 
3011  hwi_post_async_buffers(phba, 0);
3013  "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3014 
3015  return 0;
3016 }
3017 
3018 static int
3019 beiscsi_post_pages(struct beiscsi_hba *phba)
3020 {
3021  struct be_mem_descriptor *mem_descr;
3022  struct mem_array *pm_arr;
3023  unsigned int page_offset, i;
3024  struct be_dma_mem sgl;
3025  int status;
3026 
3027  mem_descr = phba->init_mem;
3028  mem_descr += HWI_MEM_SGE;
3029  pm_arr = mem_descr->mem_array;
3030 
3031  page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3032  phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
3033  for (i = 0; i < mem_descr->num_elements; i++) {
3034  hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3035  status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3036  page_offset,
3037  (pm_arr->size / PAGE_SIZE));
3038  page_offset += pm_arr->size / PAGE_SIZE;
3039  if (status != 0) {
3041  "BM_%d : post sgl failed.\n");
3042  return status;
3043  }
3044  pm_arr++;
3045  }
3047  "BM_%d : POSTED PAGES\n");
3048  return 0;
3049 }
3050 
3051 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3052 {
3053  struct be_dma_mem *mem = &q->dma_mem;
3054  if (mem->va) {
3055  pci_free_consistent(phba->pcidev, mem->size,
3056  mem->va, mem->dma);
3057  mem->va = NULL;
3058  }
3059 }
3060 
3061 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3062  u16 len, u16 entry_size)
3063 {
3064  struct be_dma_mem *mem = &q->dma_mem;
3065 
3066  memset(q, 0, sizeof(*q));
3067  q->len = len;
3068  q->entry_size = entry_size;
3069  mem->size = len * entry_size;
3070  mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3071  if (!mem->va)
3072  return -ENOMEM;
3073  memset(mem->va, 0, mem->size);
3074  return 0;
3075 }
3076 
3077 static int
3078 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3079  struct hwi_context_memory *phwi_context,
3080  struct hwi_controller *phwi_ctrlr)
3081 {
3082  unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3083  u64 pa_addr_lo;
3084  unsigned int idx, num, i;
3085  struct mem_array *pwrb_arr;
3086  void *wrb_vaddr;
3087  struct be_dma_mem sgl;
3088  struct be_mem_descriptor *mem_descr;
3089  int status;
3090 
3091  idx = 0;
3092  mem_descr = phba->init_mem;
3093  mem_descr += HWI_MEM_WRB;
3094  pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3095  GFP_KERNEL);
3096  if (!pwrb_arr) {
3098  "BM_%d : Memory alloc failed in create wrb ring.\n");
3099  return -ENOMEM;
3100  }
3101  wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3102  pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3103  num_wrb_rings = mem_descr->mem_array[idx].size /
3104  (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3105 
3106  for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3107  if (num_wrb_rings) {
3108  pwrb_arr[num].virtual_address = wrb_vaddr;
3109  pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3110  pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3111  sizeof(struct iscsi_wrb);
3112  wrb_vaddr += pwrb_arr[num].size;
3113  pa_addr_lo += pwrb_arr[num].size;
3114  num_wrb_rings--;
3115  } else {
3116  idx++;
3117  wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3118  pa_addr_lo = mem_descr->mem_array[idx].\ bus_address.u.a64.address;
3119  num_wrb_rings = mem_descr->mem_array[idx].size /
3120  (phba->params.wrbs_per_cxn *
3121  sizeof(struct iscsi_wrb));
3122  pwrb_arr[num].virtual_address = wrb_vaddr;
3123  pwrb_arr[num].bus_address.u.a64.address\
3124  = pa_addr_lo;
3125  pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3126  sizeof(struct iscsi_wrb);
3127  wrb_vaddr += pwrb_arr[num].size;
3128  pa_addr_lo += pwrb_arr[num].size;
3129  num_wrb_rings--;
3130  }
3131  }
3132  for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3133  wrb_mem_index = 0;
3134  offset = 0;
3135  size = 0;
3136 
3137  hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3138  status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3139  &phwi_context->be_wrbq[i]);
3140  if (status != 0) {
3142  "BM_%d : wrbq create failed.");
3143  kfree(pwrb_arr);
3144  return status;
3145  }
3146  phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3147  id;
3148  }
3149  kfree(pwrb_arr);
3150  return 0;
3151 }
3152 
3153 static void free_wrb_handles(struct beiscsi_hba *phba)
3154 {
3155  unsigned int index;
3156  struct hwi_controller *phwi_ctrlr;
3157  struct hwi_wrb_context *pwrb_context;
3158 
3159  phwi_ctrlr = phba->phwi_ctrlr;
3160  for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3161  pwrb_context = &phwi_ctrlr->wrb_context[index];
3162  kfree(pwrb_context->pwrb_handle_base);
3163  kfree(pwrb_context->pwrb_handle_basestd);
3164  }
3165 }
3166 
3167 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3168 {
3169  struct be_queue_info *q;
3170  struct be_ctrl_info *ctrl = &phba->ctrl;
3171 
3172  q = &phba->ctrl.mcc_obj.q;
3173  if (q->created)
3175  be_queue_free(phba, q);
3176 
3177  q = &phba->ctrl.mcc_obj.cq;
3178  if (q->created)
3179  beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3180  be_queue_free(phba, q);
3181 }
3182 
3183 static void hwi_cleanup(struct beiscsi_hba *phba)
3184 {
3185  struct be_queue_info *q;
3186  struct be_ctrl_info *ctrl = &phba->ctrl;
3187  struct hwi_controller *phwi_ctrlr;
3188  struct hwi_context_memory *phwi_context;
3189  int i, eq_num;
3190 
3191  phwi_ctrlr = phba->phwi_ctrlr;
3192  phwi_context = phwi_ctrlr->phwi_ctxt;
3193  for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3194  q = &phwi_context->be_wrbq[i];
3195  if (q->created)
3197  }
3198  free_wrb_handles(phba);
3199 
3200  q = &phwi_context->be_def_hdrq;
3201  if (q->created)
3203 
3204  q = &phwi_context->be_def_dataq;
3205  if (q->created)
3207 
3209 
3210  for (i = 0; i < (phba->num_cpus); i++) {
3211  q = &phwi_context->be_cq[i];
3212  if (q->created)
3213  beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3214  }
3215  if (phba->msix_enabled)
3216  eq_num = 1;
3217  else
3218  eq_num = 0;
3219  for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3220  q = &phwi_context->be_eq[i].q;
3221  if (q->created)
3222  beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3223  }
3224  be_mcc_queues_destroy(phba);
3225 }
3226 
3227 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3228  struct hwi_context_memory *phwi_context)
3229 {
3230  struct be_queue_info *q, *cq;
3231  struct be_ctrl_info *ctrl = &phba->ctrl;
3232 
3233  /* Alloc MCC compl queue */
3234  cq = &phba->ctrl.mcc_obj.cq;
3235  if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3236  sizeof(struct be_mcc_compl)))
3237  goto err;
3238  /* Ask BE to create MCC compl queue; */
3239  if (phba->msix_enabled) {
3240  if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3241  [phba->num_cpus].q, false, true, 0))
3242  goto mcc_cq_free;
3243  } else {
3244  if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3245  false, true, 0))
3246  goto mcc_cq_free;
3247  }
3248 
3249  /* Alloc MCC queue */
3250  q = &phba->ctrl.mcc_obj.q;
3251  if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3252  goto mcc_cq_destroy;
3253 
3254  /* Ask BE to create MCC queue */
3255  if (beiscsi_cmd_mccq_create(phba, q, cq))
3256  goto mcc_q_free;
3257 
3258  return 0;
3259 
3260 mcc_q_free:
3261  be_queue_free(phba, q);
3262 mcc_cq_destroy:
3263  beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3264 mcc_cq_free:
3265  be_queue_free(phba, cq);
3266 err:
3267  return -ENOMEM;
3268 }
3269 
3270 static int find_num_cpus(void)
3271 {
3272  int num_cpus = 0;
3273 
3274  num_cpus = num_online_cpus();
3275  if (num_cpus >= MAX_CPUS)
3276  num_cpus = MAX_CPUS - 1;
3277 
3278  return num_cpus;
3279 }
3280 
3281 static int hwi_init_port(struct beiscsi_hba *phba)
3282 {
3283  struct hwi_controller *phwi_ctrlr;
3284  struct hwi_context_memory *phwi_context;
3285  unsigned int def_pdu_ring_sz;
3286  struct be_ctrl_info *ctrl = &phba->ctrl;
3287  int status;
3288 
3289  def_pdu_ring_sz =
3290  phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3291  phwi_ctrlr = phba->phwi_ctrlr;
3292  phwi_context = phwi_ctrlr->phwi_ctxt;
3293  phwi_context->max_eqd = 0;
3294  phwi_context->min_eqd = 0;
3295  phwi_context->cur_eqd = 64;
3296  be_cmd_fw_initialize(&phba->ctrl);
3297 
3298  status = beiscsi_create_eqs(phba, phwi_context);
3299  if (status != 0) {
3301  "BM_%d : EQ not created\n");
3302  goto error;
3303  }
3304 
3305  status = be_mcc_queues_create(phba, phwi_context);
3306  if (status != 0)
3307  goto error;
3308 
3309  status = mgmt_check_supported_fw(ctrl, phba);
3310  if (status != 0) {
3312  "BM_%d : Unsupported fw version\n");
3313  goto error;
3314  }
3315 
3316  status = beiscsi_create_cqs(phba, phwi_context);
3317  if (status != 0) {
3319  "BM_%d : CQ not created\n");
3320  goto error;
3321  }
3322 
3323  status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3324  def_pdu_ring_sz);
3325  if (status != 0) {
3327  "BM_%d : Default Header not created\n");
3328  goto error;
3329  }
3330 
3331  status = beiscsi_create_def_data(phba, phwi_context,
3332  phwi_ctrlr, def_pdu_ring_sz);
3333  if (status != 0) {
3335  "BM_%d : Default Data not created\n");
3336  goto error;
3337  }
3338 
3339  status = beiscsi_post_pages(phba);
3340  if (status != 0) {
3342  "BM_%d : Post SGL Pages Failed\n");
3343  goto error;
3344  }
3345 
3346  status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3347  if (status != 0) {
3349  "BM_%d : WRB Rings not created\n");
3350  goto error;
3351  }
3352 
3354  "BM_%d : hwi_init_port success\n");
3355  return 0;
3356 
3357 error:
3359  "BM_%d : hwi_init_port failed");
3360  hwi_cleanup(phba);
3361  return status;
3362 }
3363 
3364 static int hwi_init_controller(struct beiscsi_hba *phba)
3365 {
3366  struct hwi_controller *phwi_ctrlr;
3367 
3368  phwi_ctrlr = phba->phwi_ctrlr;
3369  if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3370  phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3371  init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3373  "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3374  phwi_ctrlr->phwi_ctxt);
3375  } else {
3377  "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3378  "than one element.Failing to load\n");
3379  return -ENOMEM;
3380  }
3381 
3382  iscsi_init_global_templates(phba);
3383  if (beiscsi_init_wrb_handle(phba))
3384  return -ENOMEM;
3385 
3386  hwi_init_async_pdu_ctx(phba);
3387  if (hwi_init_port(phba) != 0) {
3389  "BM_%d : hwi_init_controller failed\n");
3390 
3391  return -ENOMEM;
3392  }
3393  return 0;
3394 }
3395 
3396 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3397 {
3398  struct be_mem_descriptor *mem_descr;
3399  int i, j;
3400 
3401  mem_descr = phba->init_mem;
3402  i = 0;
3403  j = 0;
3404  for (i = 0; i < SE_MEM_MAX; i++) {
3405  for (j = mem_descr->num_elements; j > 0; j--) {
3407  mem_descr->mem_array[j - 1].size,
3408  mem_descr->mem_array[j - 1].virtual_address,
3409  (unsigned long)mem_descr->mem_array[j - 1].
3410  bus_address.u.a64.address);
3411  }
3412  kfree(mem_descr->mem_array);
3413  mem_descr++;
3414  }
3415  kfree(phba->init_mem);
3416  kfree(phba->phwi_ctrlr);
3417 }
3418 
3419 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3420 {
3421  int ret = -ENOMEM;
3422 
3423  ret = beiscsi_get_memory(phba);
3424  if (ret < 0) {
3426  "BM_%d : beiscsi_dev_probe -"
3427  "Failed in beiscsi_alloc_memory\n");
3428  return ret;
3429  }
3430 
3431  ret = hwi_init_controller(phba);
3432  if (ret)
3433  goto free_init;
3435  "BM_%d : Return success from beiscsi_init_controller");
3436 
3437  return 0;
3438 
3439 free_init:
3440  beiscsi_free_mem(phba);
3441  return ret;
3442 }
3443 
3444 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3445 {
3446  struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3447  struct sgl_handle *psgl_handle;
3448  struct iscsi_sge *pfrag;
3449  unsigned int arr_index, i, idx;
3450 
3451  phba->io_sgl_hndl_avbl = 0;
3452  phba->eh_sgl_hndl_avbl = 0;
3453 
3454  mem_descr_sglh = phba->init_mem;
3455  mem_descr_sglh += HWI_MEM_SGLH;
3456  if (1 == mem_descr_sglh->num_elements) {
3457  phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3458  phba->params.ios_per_ctrl,
3459  GFP_KERNEL);
3460  if (!phba->io_sgl_hndl_base) {
3462  "BM_%d : Mem Alloc Failed. Failing to load\n");
3463  return -ENOMEM;
3464  }
3465  phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3466  (phba->params.icds_per_ctrl -
3467  phba->params.ios_per_ctrl),
3468  GFP_KERNEL);
3469  if (!phba->eh_sgl_hndl_base) {
3470  kfree(phba->io_sgl_hndl_base);
3472  "BM_%d : Mem Alloc Failed. Failing to load\n");
3473  return -ENOMEM;
3474  }
3475  } else {
3477  "BM_%d : HWI_MEM_SGLH is more than one element."
3478  "Failing to load\n");
3479  return -ENOMEM;
3480  }
3481 
3482  arr_index = 0;
3483  idx = 0;
3484  while (idx < mem_descr_sglh->num_elements) {
3485  psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3486 
3487  for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3488  sizeof(struct sgl_handle)); i++) {
3489  if (arr_index < phba->params.ios_per_ctrl) {
3490  phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3491  phba->io_sgl_hndl_avbl++;
3492  arr_index++;
3493  } else {
3494  phba->eh_sgl_hndl_base[arr_index -
3495  phba->params.ios_per_ctrl] =
3496  psgl_handle;
3497  arr_index++;
3498  phba->eh_sgl_hndl_avbl++;
3499  }
3500  psgl_handle++;
3501  }
3502  idx++;
3503  }
3505  "BM_%d : phba->io_sgl_hndl_avbl=%d"
3506  "phba->eh_sgl_hndl_avbl=%d\n",
3507  phba->io_sgl_hndl_avbl,
3508  phba->eh_sgl_hndl_avbl);
3509 
3510  mem_descr_sg = phba->init_mem;
3511  mem_descr_sg += HWI_MEM_SGE;
3513  "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3514  mem_descr_sg->num_elements);
3515 
3516  arr_index = 0;
3517  idx = 0;
3518  while (idx < mem_descr_sg->num_elements) {
3519  pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3520 
3521  for (i = 0;
3522  i < (mem_descr_sg->mem_array[idx].size) /
3523  (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3524  i++) {
3525  if (arr_index < phba->params.ios_per_ctrl)
3526  psgl_handle = phba->io_sgl_hndl_base[arr_index];
3527  else
3528  psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3529  phba->params.ios_per_ctrl];
3530  psgl_handle->pfrag = pfrag;
3531  AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3532  AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3533  pfrag += phba->params.num_sge_per_io;
3534  psgl_handle->sgl_index =
3535  phba->fw_config.iscsi_icd_start + arr_index++;
3536  }
3537  idx++;
3538  }
3539  phba->io_sgl_free_index = 0;
3540  phba->io_sgl_alloc_index = 0;
3541  phba->eh_sgl_free_index = 0;
3542  phba->eh_sgl_alloc_index = 0;
3543  return 0;
3544 }
3545 
3546 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3547 {
3548  int i, new_cid;
3549 
3550  phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3551  GFP_KERNEL);
3552  if (!phba->cid_array) {
3554  "BM_%d : Failed to allocate memory in "
3555  "hba_setup_cid_tbls\n");
3556  return -ENOMEM;
3557  }
3558  phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3559  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3560  if (!phba->ep_array) {
3562  "BM_%d : Failed to allocate memory in "
3563  "hba_setup_cid_tbls\n");
3564  kfree(phba->cid_array);
3565  return -ENOMEM;
3566  }
3567  new_cid = phba->fw_config.iscsi_cid_start;
3568  for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3569  phba->cid_array[i] = new_cid;
3570  new_cid += 2;
3571  }
3572  phba->avlbl_cids = phba->params.cxns_per_ctrl;
3573  return 0;
3574 }
3575 
3576 static void hwi_enable_intr(struct beiscsi_hba *phba)
3577 {
3578  struct be_ctrl_info *ctrl = &phba->ctrl;
3579  struct hwi_controller *phwi_ctrlr;
3580  struct hwi_context_memory *phwi_context;
3581  struct be_queue_info *eq;
3582  u8 __iomem *addr;
3583  u32 reg, i;
3584  u32 enabled;
3585 
3586  phwi_ctrlr = phba->phwi_ctrlr;
3587  phwi_context = phwi_ctrlr->phwi_ctxt;
3588 
3589  addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3591  reg = ioread32(addr);
3592 
3593  enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3594  if (!enabled) {
3597  "BM_%d : reg =x%08x addr=%p\n", reg, addr);
3598  iowrite32(reg, addr);
3599  }
3600 
3601  if (!phba->msix_enabled) {
3602  eq = &phwi_context->be_eq[0].q;
3604  "BM_%d : eq->id=%d\n", eq->id);
3605 
3606  hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3607  } else {
3608  for (i = 0; i <= phba->num_cpus; i++) {
3609  eq = &phwi_context->be_eq[i].q;
3611  "BM_%d : eq->id=%d\n", eq->id);
3612  hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3613  }
3614  }
3615 }
3616 
3617 static void hwi_disable_intr(struct beiscsi_hba *phba)
3618 {
3619  struct be_ctrl_info *ctrl = &phba->ctrl;
3620 
3622  u32 reg = ioread32(addr);
3623 
3624  u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3625  if (enabled) {
3626  reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3627  iowrite32(reg, addr);
3628  } else
3630  "BM_%d : In hwi_disable_intr, Already Disabled\n");
3631 }
3632 
3643 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3644 {
3645  struct be_cmd_get_session_resp *session_resp;
3646  struct be_mcc_wrb *wrb;
3647  struct be_dma_mem nonemb_cmd;
3648  unsigned int tag, wrb_num;
3649  unsigned short status, extd_status;
3650  unsigned int s_handle;
3651  struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3652  int ret = -ENOMEM;
3653 
3654  /* Get the session handle of the boot target */
3655  ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3656  if (ret) {
3657  beiscsi_log(phba, KERN_ERR,
3659  "BM_%d : No boot session\n");
3660  return ret;
3661  }
3662  nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3663  sizeof(*session_resp),
3664  &nonemb_cmd.dma);
3665  if (nonemb_cmd.va == NULL) {
3666  beiscsi_log(phba, KERN_ERR,
3668  "BM_%d : Failed to allocate memory for"
3669  "beiscsi_get_session_info\n");
3670 
3671  return -ENOMEM;
3672  }
3673 
3674  memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3675  tag = mgmt_get_session_info(phba, s_handle,
3676  &nonemb_cmd);
3677  if (!tag) {
3678  beiscsi_log(phba, KERN_ERR,
3680  "BM_%d : beiscsi_get_session_info"
3681  " Failed\n");
3682 
3683  goto boot_freemem;
3684  } else
3685  wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3686  phba->ctrl.mcc_numtag[tag]);
3687 
3688  wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3689  extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3690  status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3691  if (status || extd_status) {
3692  beiscsi_log(phba, KERN_ERR,
3694  "BM_%d : beiscsi_get_session_info Failed"
3695  " status = %d extd_status = %d\n",
3696  status, extd_status);
3697 
3698  free_mcc_tag(&phba->ctrl, tag);
3699  goto boot_freemem;
3700  }
3701  wrb = queue_get_wrb(mccq, wrb_num);
3702  free_mcc_tag(&phba->ctrl, tag);
3703  session_resp = nonemb_cmd.va ;
3704 
3705  memcpy(&phba->boot_sess, &session_resp->session_info,
3706  sizeof(struct mgmt_session_info));
3707  ret = 0;
3708 
3709 boot_freemem:
3710  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3711  nonemb_cmd.va, nonemb_cmd.dma);
3712  return ret;
3713 }
3714 
3715 static void beiscsi_boot_release(void *data)
3716 {
3717  struct beiscsi_hba *phba = data;
3718 
3719  scsi_host_put(phba->shost);
3720 }
3721 
3722 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3723 {
3724  struct iscsi_boot_kobj *boot_kobj;
3725 
3726  /* get boot info using mgmt cmd */
3727  if (beiscsi_get_boot_info(phba))
3728  /* Try to see if we can carry on without this */
3729  return 0;
3730 
3731  phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3732  if (!phba->boot_kset)
3733  return -ENOMEM;
3734 
3735  /* get a ref because the show function will ref the phba */
3736  if (!scsi_host_get(phba->shost))
3737  goto free_kset;
3738  boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3739  beiscsi_show_boot_tgt_info,
3740  beiscsi_tgt_get_attr_visibility,
3741  beiscsi_boot_release);
3742  if (!boot_kobj)
3743  goto put_shost;
3744 
3745  if (!scsi_host_get(phba->shost))
3746  goto free_kset;
3747  boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3748  beiscsi_show_boot_ini_info,
3749  beiscsi_ini_get_attr_visibility,
3750  beiscsi_boot_release);
3751  if (!boot_kobj)
3752  goto put_shost;
3753 
3754  if (!scsi_host_get(phba->shost))
3755  goto free_kset;
3756  boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3757  beiscsi_show_boot_eth_info,
3758  beiscsi_eth_get_attr_visibility,
3759  beiscsi_boot_release);
3760  if (!boot_kobj)
3761  goto put_shost;
3762  return 0;
3763 
3764 put_shost:
3765  scsi_host_put(phba->shost);
3766 free_kset:
3768  return -ENOMEM;
3769 }
3770 
3771 static int beiscsi_init_port(struct beiscsi_hba *phba)
3772 {
3773  int ret;
3774 
3775  ret = beiscsi_init_controller(phba);
3776  if (ret < 0) {
3778  "BM_%d : beiscsi_dev_probe - Failed in"
3779  "beiscsi_init_controller\n");
3780  return ret;
3781  }
3782  ret = beiscsi_init_sgl_handle(phba);
3783  if (ret < 0) {
3785  "BM_%d : beiscsi_dev_probe - Failed in"
3786  "beiscsi_init_sgl_handle\n");
3787  goto do_cleanup_ctrlr;
3788  }
3789 
3790  if (hba_setup_cid_tbls(phba)) {
3792  "BM_%d : Failed in hba_setup_cid_tbls\n");
3793  kfree(phba->io_sgl_hndl_base);
3794  kfree(phba->eh_sgl_hndl_base);
3795  goto do_cleanup_ctrlr;
3796  }
3797 
3798  return ret;
3799 
3800 do_cleanup_ctrlr:
3801  hwi_cleanup(phba);
3802  return ret;
3803 }
3804 
3805 static void hwi_purge_eq(struct beiscsi_hba *phba)
3806 {
3807  struct hwi_controller *phwi_ctrlr;
3808  struct hwi_context_memory *phwi_context;
3809  struct be_queue_info *eq;
3810  struct be_eq_entry *eqe = NULL;
3811  int i, eq_msix;
3812  unsigned int num_processed;
3813 
3814  phwi_ctrlr = phba->phwi_ctrlr;
3815  phwi_context = phwi_ctrlr->phwi_ctxt;
3816  if (phba->msix_enabled)
3817  eq_msix = 1;
3818  else
3819  eq_msix = 0;
3820 
3821  for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3822  eq = &phwi_context->be_eq[i].q;
3823  eqe = queue_tail_node(eq);
3824  num_processed = 0;
3825  while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3826  & EQE_VALID_MASK) {
3827  AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3828  queue_tail_inc(eq);
3829  eqe = queue_tail_node(eq);
3830  num_processed++;
3831  }
3832 
3833  if (num_processed)
3834  hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3835  }
3836 }
3837 
3838 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3839 {
3840  int mgmt_status;
3841 
3842  mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3843  if (mgmt_status)
3845  "BM_%d : mgmt_epfw_cleanup FAILED\n");
3846 
3847  hwi_purge_eq(phba);
3848  hwi_cleanup(phba);
3849  kfree(phba->io_sgl_hndl_base);
3850  kfree(phba->eh_sgl_hndl_base);
3851  kfree(phba->cid_array);
3852  kfree(phba->ep_array);
3853 }
3854 
3855 static void beiscsi_cleanup_task(struct iscsi_task *task)
3856 {
3857  struct beiscsi_io_task *io_task = task->dd_data;
3858  struct iscsi_conn *conn = task->conn;
3859  struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3860  struct beiscsi_hba *phba = beiscsi_conn->phba;
3861  struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3862  struct hwi_wrb_context *pwrb_context;
3863  struct hwi_controller *phwi_ctrlr;
3864 
3865  phwi_ctrlr = phba->phwi_ctrlr;
3866  pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3867  - phba->fw_config.iscsi_cid_start];
3868 
3869  if (io_task->cmd_bhs) {
3870  pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3871  io_task->bhs_pa.u.a64.address);
3872  io_task->cmd_bhs = NULL;
3873  }
3874 
3875  if (task->sc) {
3876  if (io_task->pwrb_handle) {
3877  free_wrb_handle(phba, pwrb_context,
3878  io_task->pwrb_handle);
3879  io_task->pwrb_handle = NULL;
3880  }
3881 
3882  if (io_task->psgl_handle) {
3883  spin_lock(&phba->io_sgl_lock);
3884  free_io_sgl_handle(phba, io_task->psgl_handle);
3885  spin_unlock(&phba->io_sgl_lock);
3886  io_task->psgl_handle = NULL;
3887  }
3888  } else {
3889  if (!beiscsi_conn->login_in_progress) {
3890  if (io_task->pwrb_handle) {
3891  free_wrb_handle(phba, pwrb_context,
3892  io_task->pwrb_handle);
3893  io_task->pwrb_handle = NULL;
3894  }
3895  if (io_task->psgl_handle) {
3896  spin_lock(&phba->mgmt_sgl_lock);
3897  free_mgmt_sgl_handle(phba,
3898  io_task->psgl_handle);
3899  spin_unlock(&phba->mgmt_sgl_lock);
3900  io_task->psgl_handle = NULL;
3901  }
3902  }
3903  }
3904 }
3905 
3906 void
3907 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3909 {
3910  struct wrb_handle *pwrb_handle;
3911  struct iscsi_target_context_update_wrb *pwrb = NULL;
3912  struct be_mem_descriptor *mem_descr;
3913  struct beiscsi_hba *phba = beiscsi_conn->phba;
3914  struct iscsi_task *task = beiscsi_conn->task;
3915  struct iscsi_session *session = task->conn->session;
3916  u32 doorbell = 0;
3917 
3918  /*
3919  * We can always use 0 here because it is reserved by libiscsi for
3920  * login/startup related tasks.
3921  */
3922  beiscsi_conn->login_in_progress = 0;
3923  spin_lock_bh(&session->lock);
3924  beiscsi_cleanup_task(task);
3925  spin_unlock_bh(&session->lock);
3926 
3927  pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3928  phba->fw_config.iscsi_cid_start));
3929  pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3930  memset(pwrb, 0, sizeof(*pwrb));
3932  max_burst_length, pwrb, params->dw[offsetof
3934  max_burst_length) / 32]);
3936  max_send_data_segment_length, pwrb,
3937  params->dw[offsetof(struct amap_beiscsi_offload_params,
3938  max_send_data_segment_length) / 32]);
3940  first_burst_length,
3941  pwrb,
3942  params->dw[offsetof(struct amap_beiscsi_offload_params,
3943  first_burst_length) / 32]);
3944 
3946  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3947  erl) / 32] & OFFLD_PARAMS_ERL));
3949  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3950  dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3952  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3953  hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3955  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3956  ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3958  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3959  imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3961  pwrb,
3962  (params->dw[offsetof(struct amap_beiscsi_offload_params,
3963  exp_statsn) / 32] + 1));
3965  0x7);
3967  pwrb, pwrb_handle->wrb_index);
3969  pwrb, pwrb_handle->nxt_wrb_index);
3971  session_state, pwrb, 0);
3973  pwrb, 1);
3975  pwrb, 0);
3977  0);
3978 
3979  mem_descr = phba->init_mem;
3980  mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3981 
3983  pad_buffer_addr_hi, pwrb,
3984  mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3986  pad_buffer_addr_lo, pwrb,
3987  mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3988 
3989  be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3990 
3991  doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3992  doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3994  doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3995 
3996  iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3997 }
3998 
3999 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4000  int *index, int *age)
4001 {
4002  *index = (int)itt;
4003  if (age)
4004  *age = conn->session->age;
4005 }
4006 
4017 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4018 {
4019  struct beiscsi_io_task *io_task = task->dd_data;
4020  struct iscsi_conn *conn = task->conn;
4021  struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4022  struct beiscsi_hba *phba = beiscsi_conn->phba;
4023  struct hwi_wrb_context *pwrb_context;
4024  struct hwi_controller *phwi_ctrlr;
4025  itt_t itt;
4026  struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4027  dma_addr_t paddr;
4028 
4029  io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4030  GFP_ATOMIC, &paddr);
4031  if (!io_task->cmd_bhs)
4032  return -ENOMEM;
4033  io_task->bhs_pa.u.a64.address = paddr;
4034  io_task->libiscsi_itt = (itt_t)task->itt;
4035  io_task->conn = beiscsi_conn;
4036 
4037  task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4038  task->hdr_max = sizeof(struct be_cmd_bhs);
4039  io_task->psgl_handle = NULL;
4040  io_task->pwrb_handle = NULL;
4041 
4042  if (task->sc) {
4043  spin_lock(&phba->io_sgl_lock);
4044  io_task->psgl_handle = alloc_io_sgl_handle(phba);
4045  spin_unlock(&phba->io_sgl_lock);
4046  if (!io_task->psgl_handle)
4047  goto free_hndls;
4048  io_task->pwrb_handle = alloc_wrb_handle(phba,
4049  beiscsi_conn->beiscsi_conn_cid -
4050  phba->fw_config.iscsi_cid_start);
4051  if (!io_task->pwrb_handle)
4052  goto free_io_hndls;
4053  } else {
4054  io_task->scsi_cmnd = NULL;
4055  if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4056  if (!beiscsi_conn->login_in_progress) {
4057  spin_lock(&phba->mgmt_sgl_lock);
4058  io_task->psgl_handle = (struct sgl_handle *)
4059  alloc_mgmt_sgl_handle(phba);
4060  spin_unlock(&phba->mgmt_sgl_lock);
4061  if (!io_task->psgl_handle)
4062  goto free_hndls;
4063 
4064  beiscsi_conn->login_in_progress = 1;
4065  beiscsi_conn->plogin_sgl_handle =
4066  io_task->psgl_handle;
4067  io_task->pwrb_handle =
4068  alloc_wrb_handle(phba,
4069  beiscsi_conn->beiscsi_conn_cid -
4070  phba->fw_config.iscsi_cid_start);
4071  if (!io_task->pwrb_handle)
4072  goto free_io_hndls;
4073  beiscsi_conn->plogin_wrb_handle =
4074  io_task->pwrb_handle;
4075 
4076  } else {
4077  io_task->psgl_handle =
4078  beiscsi_conn->plogin_sgl_handle;
4079  io_task->pwrb_handle =
4080  beiscsi_conn->plogin_wrb_handle;
4081  }
4082  beiscsi_conn->task = task;
4083  } else {
4084  spin_lock(&phba->mgmt_sgl_lock);
4085  io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4086  spin_unlock(&phba->mgmt_sgl_lock);
4087  if (!io_task->psgl_handle)
4088  goto free_hndls;
4089  io_task->pwrb_handle =
4090  alloc_wrb_handle(phba,
4091  beiscsi_conn->beiscsi_conn_cid -
4092  phba->fw_config.iscsi_cid_start);
4093  if (!io_task->pwrb_handle)
4094  goto free_mgmt_hndls;
4095 
4096  }
4097  }
4098  itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4099  wrb_index << 16) | (unsigned int)
4100  (io_task->psgl_handle->sgl_index));
4101  io_task->pwrb_handle->pio_handle = task;
4102 
4103  io_task->cmd_bhs->iscsi_hdr.itt = itt;
4104  return 0;
4105 
4106 free_io_hndls:
4107  spin_lock(&phba->io_sgl_lock);
4108  free_io_sgl_handle(phba, io_task->psgl_handle);
4109  spin_unlock(&phba->io_sgl_lock);
4110  goto free_hndls;
4111 free_mgmt_hndls:
4112  spin_lock(&phba->mgmt_sgl_lock);
4113  free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4114  spin_unlock(&phba->mgmt_sgl_lock);
4115 free_hndls:
4116  phwi_ctrlr = phba->phwi_ctrlr;
4117  pwrb_context = &phwi_ctrlr->wrb_context[
4118  beiscsi_conn->beiscsi_conn_cid -
4119  phba->fw_config.iscsi_cid_start];
4120  if (io_task->pwrb_handle)
4121  free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4122  io_task->pwrb_handle = NULL;
4123  pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4124  io_task->bhs_pa.u.a64.address);
4125  io_task->cmd_bhs = NULL;
4126  beiscsi_log(phba, KERN_ERR,
4128  "BM_%d : Alloc of SGL_ICD Failed\n");
4129  return -ENOMEM;
4130 }
4131 
4132 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4133  unsigned int num_sg, unsigned int xferlen,
4134  unsigned int writedir)
4135 {
4136 
4137  struct beiscsi_io_task *io_task = task->dd_data;
4138  struct iscsi_conn *conn = task->conn;
4139  struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4140  struct beiscsi_hba *phba = beiscsi_conn->phba;
4141  struct iscsi_wrb *pwrb = NULL;
4142  unsigned int doorbell = 0;
4143 
4144  pwrb = io_task->pwrb_handle->pwrb;
4145  io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4146  io_task->bhs_len = sizeof(struct be_cmd_bhs);
4147 
4148  if (writedir) {
4149  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4150  INI_WR_CMD);
4151  AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4152  } else {
4153  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4154  INI_RD_CMD);
4155  AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4156  }
4157 
4158  AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4159  cpu_to_be16(*(unsigned short *)
4160  &io_task->cmd_bhs->iscsi_hdr.lun));
4161  AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4162  AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4163  io_task->pwrb_handle->wrb_index);
4164  AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4165  be32_to_cpu(task->cmdsn));
4166  AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4167  io_task->psgl_handle->sgl_index);
4168 
4169  hwi_write_sgl(pwrb, sg, num_sg, io_task);
4170 
4171  AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4172  io_task->pwrb_handle->nxt_wrb_index);
4173  be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4174 
4175  doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4176  doorbell |= (io_task->pwrb_handle->wrb_index &
4178  doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4179 
4180  iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4181  return 0;
4182 }
4183 
4184 static int beiscsi_mtask(struct iscsi_task *task)
4185 {
4186  struct beiscsi_io_task *io_task = task->dd_data;
4187  struct iscsi_conn *conn = task->conn;
4188  struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4189  struct beiscsi_hba *phba = beiscsi_conn->phba;
4190  struct iscsi_wrb *pwrb = NULL;
4191  unsigned int doorbell = 0;
4192  unsigned int cid;
4193 
4194  cid = beiscsi_conn->beiscsi_conn_cid;
4195  pwrb = io_task->pwrb_handle->pwrb;
4196  memset(pwrb, 0, sizeof(*pwrb));
4197  AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4198  be32_to_cpu(task->cmdsn));
4199  AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4200  io_task->pwrb_handle->wrb_index);
4201  AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4202  io_task->psgl_handle->sgl_index);
4203 
4204  switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4205  case ISCSI_OP_LOGIN:
4206  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4207  TGT_DM_CMD);
4208  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4209  AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4210  hwi_write_buffer(pwrb, task);
4211  break;
4212  case ISCSI_OP_NOOP_OUT:
4213  if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4214  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4215  TGT_DM_CMD);
4216  AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4217  pwrb, 0);
4218  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4219  } else {
4220  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4221  INI_RD_CMD);
4222  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4223  }
4224  hwi_write_buffer(pwrb, task);
4225  break;
4226  case ISCSI_OP_TEXT:
4227  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4228  TGT_DM_CMD);
4229  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4230  hwi_write_buffer(pwrb, task);
4231  break;
4232  case ISCSI_OP_SCSI_TMFUNC:
4233  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4234  INI_TMF_CMD);
4235  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4236  hwi_write_buffer(pwrb, task);
4237  break;
4238  case ISCSI_OP_LOGOUT:
4239  AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4240  AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4241  HWH_TYPE_LOGOUT);
4242  hwi_write_buffer(pwrb, task);
4243  break;
4244 
4245  default:
4247  "BM_%d : opcode =%d Not supported\n",
4248  task->hdr->opcode & ISCSI_OPCODE_MASK);
4249 
4250  return -EINVAL;
4251  }
4252 
4253  AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4254  task->data_count);
4255  AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4256  io_task->pwrb_handle->nxt_wrb_index);
4257  be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4258 
4259  doorbell |= cid & DB_WRB_POST_CID_MASK;
4260  doorbell |= (io_task->pwrb_handle->wrb_index &
4262  doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4263  iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4264  return 0;
4265 }
4266 
4267 static int beiscsi_task_xmit(struct iscsi_task *task)
4268 {
4269  struct beiscsi_io_task *io_task = task->dd_data;
4270  struct scsi_cmnd *sc = task->sc;
4271  struct scatterlist *sg;
4272  int num_sg;
4273  unsigned int writedir = 0, xferlen = 0;
4274 
4275  if (!sc)
4276  return beiscsi_mtask(task);
4277 
4278  io_task->scsi_cmnd = sc;
4279  num_sg = scsi_dma_map(sc);
4280  if (num_sg < 0) {
4281  struct iscsi_conn *conn = task->conn;
4282  struct beiscsi_hba *phba = NULL;
4283 
4284  phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4286  "BM_%d : scsi_dma_map Failed\n");
4287 
4288  return num_sg;
4289  }
4290  xferlen = scsi_bufflen(sc);
4291  sg = scsi_sglist(sc);
4292  if (sc->sc_data_direction == DMA_TO_DEVICE)
4293  writedir = 1;
4294  else
4295  writedir = 0;
4296 
4297  return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4298 }
4299 
4304 static int beiscsi_bsg_request(struct bsg_job *job)
4305 {
4306  struct Scsi_Host *shost;
4307  struct beiscsi_hba *phba;
4308  struct iscsi_bsg_request *bsg_req = job->request;
4309  int rc = -EINVAL;
4310  unsigned int tag;
4311  struct be_dma_mem nonemb_cmd;
4312  struct be_cmd_resp_hdr *resp;
4313  struct iscsi_bsg_reply *bsg_reply = job->reply;
4314  unsigned short status, extd_status;
4315 
4316  shost = iscsi_job_to_shost(job);
4317  phba = iscsi_host_priv(shost);
4318 
4319  switch (bsg_req->msgcode) {
4320  case ISCSI_BSG_HST_VENDOR:
4321  nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4322  job->request_payload.payload_len,
4323  &nonemb_cmd.dma);
4324  if (nonemb_cmd.va == NULL) {
4326  "BM_%d : Failed to allocate memory for "
4327  "beiscsi_bsg_request\n");
4328  return -EIO;
4329  }
4330  tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4331  &nonemb_cmd);
4332  if (!tag) {
4334  "BM_%d : be_cmd_get_mac_addr Failed\n");
4335 
4336  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4337  nonemb_cmd.va, nonemb_cmd.dma);
4338  return -EAGAIN;
4339  } else
4340  wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4341  phba->ctrl.mcc_numtag[tag]);
4342  extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4343  status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4344  free_mcc_tag(&phba->ctrl, tag);
4345  resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4346  sg_copy_from_buffer(job->reply_payload.sg_list,
4347  job->reply_payload.sg_cnt,
4348  nonemb_cmd.va, (resp->response_length
4349  + sizeof(*resp)));
4350  bsg_reply->reply_payload_rcv_len = resp->response_length;
4351  bsg_reply->result = status;
4352  bsg_job_done(job, bsg_reply->result,
4353  bsg_reply->reply_payload_rcv_len);
4354  pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4355  nonemb_cmd.va, nonemb_cmd.dma);
4356  if (status || extd_status) {
4358  "BM_%d : be_cmd_get_mac_addr Failed"
4359  " status = %d extd_status = %d\n",
4360  status, extd_status);
4361 
4362  return -EIO;
4363  }
4364  break;
4365 
4366  default:
4368  "BM_%d : Unsupported bsg command: 0x%x\n",
4369  bsg_req->msgcode);
4370  break;
4371  }
4372 
4373  return rc;
4374 }
4375 
4376 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4378  /* Set the logging parameter */
4379  beiscsi_log_enable_init(phba, beiscsi_log_enable);
4380 }
4381 
4382 static void beiscsi_quiesce(struct beiscsi_hba *phba)
4383 {
4384  struct hwi_controller *phwi_ctrlr;
4385  struct hwi_context_memory *phwi_context;
4386  struct be_eq_obj *pbe_eq;
4387  unsigned int i, msix_vec;
4388  u8 *real_offset = 0;
4389  u32 value = 0;
4390 
4391  phwi_ctrlr = phba->phwi_ctrlr;
4392  phwi_context = phwi_ctrlr->phwi_ctxt;
4393  hwi_disable_intr(phba);
4394  if (phba->msix_enabled) {
4395  for (i = 0; i <= phba->num_cpus; i++) {
4396  msix_vec = phba->msix_entries[i].vector;
4397  free_irq(msix_vec, &phwi_context->be_eq[i]);
4398  kfree(phba->msi_name[i]);
4399  }
4400  } else
4401  if (phba->pcidev->irq)
4402  free_irq(phba->pcidev->irq, phba);
4403  pci_disable_msix(phba->pcidev);
4404  destroy_workqueue(phba->wq);
4405  if (blk_iopoll_enabled)
4406  for (i = 0; i < phba->num_cpus; i++) {
4407  pbe_eq = &phwi_context->be_eq[i];
4408  blk_iopoll_disable(&pbe_eq->iopoll);
4409  }
4410 
4411  beiscsi_clean_port(phba);
4412  beiscsi_free_mem(phba);
4413  real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4414 
4415  value = readl((void *)real_offset);
4416 
4417  if (value & 0x00010000) {
4418  value &= 0xfffeffff;
4419  writel(value, (void *)real_offset);
4420  }
4421  beiscsi_unmap_pci_function(phba);
4423  phba->ctrl.mbox_mem_alloced.size,
4424  phba->ctrl.mbox_mem_alloced.va,
4425  phba->ctrl.mbox_mem_alloced.dma);
4426 }
4427 
4428 static void beiscsi_remove(struct pci_dev *pcidev)
4429 {
4430 
4431  struct beiscsi_hba *phba = NULL;
4432 
4433  phba = pci_get_drvdata(pcidev);
4434  if (!phba) {
4435  dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4436  return;
4437  }
4438 
4440  beiscsi_quiesce(phba);
4442  iscsi_host_remove(phba->shost);
4443  pci_dev_put(phba->pcidev);
4444  iscsi_host_free(phba->shost);
4445  pci_disable_device(pcidev);
4446 }
4447 
4448 static void beiscsi_shutdown(struct pci_dev *pcidev)
4449 {
4450 
4451  struct beiscsi_hba *phba = NULL;
4452 
4453  phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4454  if (!phba) {
4455  dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4456  return;
4457  }
4458 
4459  beiscsi_quiesce(phba);
4460  pci_disable_device(pcidev);
4461 }
4462 
4463 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4464 {
4465  int i, status;
4466 
4467  for (i = 0; i <= phba->num_cpus; i++)
4468  phba->msix_entries[i].entry = i;
4469 
4470  status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4471  (phba->num_cpus + 1));
4472  if (!status)
4473  phba->msix_enabled = true;
4474 
4475  return;
4476 }
4477 
4478 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4479  const struct pci_device_id *id)
4480 {
4481  struct beiscsi_hba *phba = NULL;
4482  struct hwi_controller *phwi_ctrlr;
4483  struct hwi_context_memory *phwi_context;
4484  struct be_eq_obj *pbe_eq;
4485  int ret, num_cpus, i;
4486  u8 *real_offset = 0;
4487  u32 value = 0;
4488 
4489  ret = beiscsi_enable_pci(pcidev);
4490  if (ret < 0) {
4491  dev_err(&pcidev->dev,
4492  "beiscsi_dev_probe - Failed to enable pci device\n");
4493  return ret;
4494  }
4495 
4496  phba = beiscsi_hba_alloc(pcidev);
4497  if (!phba) {
4498  dev_err(&pcidev->dev,
4499  "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
4500  goto disable_pci;
4501  }
4502 
4503  /* Initialize Driver configuration Paramters */
4504  beiscsi_hba_attrs_init(phba);
4505 
4506  switch (pcidev->device) {
4507  case BE_DEVICE_ID1:
4508  case OC_DEVICE_ID1:
4509  case OC_DEVICE_ID2:
4510  phba->generation = BE_GEN2;
4511  break;
4512  case BE_DEVICE_ID2:
4513  case OC_DEVICE_ID3:
4514  phba->generation = BE_GEN3;
4515  break;
4516  default:
4517  phba->generation = 0;
4518  }
4519 
4520  if (enable_msix)
4521  num_cpus = find_num_cpus();
4522  else
4523  num_cpus = 1;
4524  phba->num_cpus = num_cpus;
4526  "BM_%d : num_cpus = %d\n",
4527  phba->num_cpus);
4528 
4529  if (enable_msix) {
4530  beiscsi_msix_enable(phba);
4531  if (!phba->msix_enabled)
4532  phba->num_cpus = 1;
4533  }
4534  ret = be_ctrl_init(phba, pcidev);
4535  if (ret) {
4537  "BM_%d : beiscsi_dev_probe-"
4538  "Failed in be_ctrl_init\n");
4539  goto hba_free;
4540  }
4541 
4542  if (!num_hba) {
4543  real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4544  value = readl((void *)real_offset);
4545  if (value & 0x00010000) {
4546  gcrashmode++;
4548  "BM_%d : Loading Driver in crashdump mode\n");
4549  ret = beiscsi_cmd_reset_function(phba);
4550  if (ret) {
4552  "BM_%d : Reset Failed. Aborting Crashdump\n");
4553  goto hba_free;
4554  }
4555  ret = be_chk_reset_complete(phba);
4556  if (ret) {
4558  "BM_%d : Failed to get out of reset."
4559  "Aborting Crashdump\n");
4560  goto hba_free;
4561  }
4562  } else {
4563  value |= 0x00010000;
4564  writel(value, (void *)real_offset);
4565  num_hba++;
4566  }
4567  }
4568 
4569  spin_lock_init(&phba->io_sgl_lock);
4570  spin_lock_init(&phba->mgmt_sgl_lock);
4571  spin_lock_init(&phba->isr_lock);
4572  ret = mgmt_get_fw_config(&phba->ctrl, phba);
4573  if (ret != 0) {
4575  "BM_%d : Error getting fw config\n");
4576  goto free_port;
4577  }
4578  phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4579  beiscsi_get_params(phba);
4580  phba->shost->can_queue = phba->params.ios_per_ctrl;
4581  ret = beiscsi_init_port(phba);
4582  if (ret < 0) {
4584  "BM_%d : beiscsi_dev_probe-"
4585  "Failed in beiscsi_init_port\n");
4586  goto free_port;
4587  }
4588 
4589  for (i = 0; i < MAX_MCC_CMD ; i++) {
4590  init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4591  phba->ctrl.mcc_tag[i] = i + 1;
4592  phba->ctrl.mcc_numtag[i + 1] = 0;
4593  phba->ctrl.mcc_tag_available++;
4594  }
4595 
4596  phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4597 
4598  snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4599  phba->shost->host_no);
4600  phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4601  if (!phba->wq) {
4603  "BM_%d : beiscsi_dev_probe-"
4604  "Failed to allocate work queue\n");
4605  goto free_twq;
4606  }
4607 
4609 
4610  phwi_ctrlr = phba->phwi_ctrlr;
4611  phwi_context = phwi_ctrlr->phwi_ctxt;
4612  if (blk_iopoll_enabled) {
4613  for (i = 0; i < phba->num_cpus; i++) {
4614  pbe_eq = &phwi_context->be_eq[i];
4615  blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4616  be_iopoll);
4617  blk_iopoll_enable(&pbe_eq->iopoll);
4618  }
4619  }
4620  ret = beiscsi_init_irqs(phba);
4621  if (ret < 0) {
4623  "BM_%d : beiscsi_dev_probe-"
4624  "Failed to beiscsi_init_irqs\n");
4625  goto free_blkenbld;
4626  }
4627  hwi_enable_intr(phba);
4628 
4629  if (beiscsi_setup_boot_info(phba))
4630  /*
4631  * log error but continue, because we may not be using
4632  * iscsi boot.
4633  */
4635  "BM_%d : Could not set up "
4636  "iSCSI boot info.\n");
4637 
4640  "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
4641  return 0;
4642 
4643 free_blkenbld:
4644  destroy_workqueue(phba->wq);
4645  if (blk_iopoll_enabled)
4646  for (i = 0; i < phba->num_cpus; i++) {
4647  pbe_eq = &phwi_context->be_eq[i];
4648  blk_iopoll_disable(&pbe_eq->iopoll);
4649  }
4650 free_twq:
4651  beiscsi_clean_port(phba);
4652  beiscsi_free_mem(phba);
4653 free_port:
4654  real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4655 
4656  value = readl((void *)real_offset);
4657 
4658  if (value & 0x00010000) {
4659  value &= 0xfffeffff;
4660  writel(value, (void *)real_offset);
4661  }
4662 
4664  phba->ctrl.mbox_mem_alloced.size,
4665  phba->ctrl.mbox_mem_alloced.va,
4666  phba->ctrl.mbox_mem_alloced.dma);
4667  beiscsi_unmap_pci_function(phba);
4668 hba_free:
4669  if (phba->msix_enabled)
4670  pci_disable_msix(phba->pcidev);
4671  iscsi_host_remove(phba->shost);
4672  pci_dev_put(phba->pcidev);
4673  iscsi_host_free(phba->shost);
4674 disable_pci:
4675  pci_disable_device(pcidev);
4676  return ret;
4677 }
4678 
4680  .owner = THIS_MODULE,
4681  .name = DRV_NAME,
4684  .create_session = beiscsi_session_create,
4685  .destroy_session = beiscsi_session_destroy,
4686  .create_conn = beiscsi_conn_create,
4687  .bind_conn = beiscsi_conn_bind,
4688  .destroy_conn = iscsi_conn_teardown,
4689  .attr_is_visible = be2iscsi_attr_is_visible,
4690  .set_iface_param = be2iscsi_iface_set_param,
4691  .get_iface_param = be2iscsi_iface_get_param,
4692  .set_param = beiscsi_set_param,
4693  .get_conn_param = iscsi_conn_get_param,
4694  .get_session_param = iscsi_session_get_param,
4695  .get_host_param = beiscsi_get_host_param,
4696  .start_conn = beiscsi_conn_start,
4697  .stop_conn = iscsi_conn_stop,
4698  .send_pdu = iscsi_conn_send_pdu,
4699  .xmit_task = beiscsi_task_xmit,
4700  .cleanup_task = beiscsi_cleanup_task,
4701  .alloc_pdu = beiscsi_alloc_pdu,
4702  .parse_pdu_itt = beiscsi_parse_pdu,
4703  .get_stats = beiscsi_conn_get_stats,
4704  .get_ep_param = beiscsi_ep_get_param,
4705  .ep_connect = beiscsi_ep_connect,
4706  .ep_poll = beiscsi_ep_poll,
4707  .ep_disconnect = beiscsi_ep_disconnect,
4708  .session_recovery_timedout = iscsi_session_recovery_timedout,
4709  .bsg_request = beiscsi_bsg_request,
4710 };
4711 
4712 static struct pci_driver beiscsi_pci_driver = {
4713  .name = DRV_NAME,
4714  .probe = beiscsi_dev_probe,
4715  .remove = beiscsi_remove,
4716  .shutdown = beiscsi_shutdown,
4717  .id_table = beiscsi_pci_id_table
4718 };
4719 
4720 
4721 static int __init beiscsi_module_init(void)
4722 {
4723  int ret;
4724 
4725  beiscsi_scsi_transport =
4726  iscsi_register_transport(&beiscsi_iscsi_transport);
4727  if (!beiscsi_scsi_transport) {
4729  "beiscsi_module_init - Unable to register beiscsi transport.\n");
4730  return -ENOMEM;
4731  }
4732  printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4733  &beiscsi_iscsi_transport);
4734 
4735  ret = pci_register_driver(&beiscsi_pci_driver);
4736  if (ret) {
4738  "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
4739  goto unregister_iscsi_transport;
4740  }
4741  return 0;
4742 
4743 unregister_iscsi_transport:
4744  iscsi_unregister_transport(&beiscsi_iscsi_transport);
4745  return ret;
4746 }
4747 
4748 static void __exit beiscsi_module_exit(void)
4749 {
4750  pci_unregister_driver(&beiscsi_pci_driver);
4751  iscsi_unregister_transport(&beiscsi_iscsi_transport);
4752 }
4753 
4754 module_init(beiscsi_module_init);
4755 module_exit(beiscsi_module_exit);
4756