Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2i_iscsi.c
Go to the documentation of this file.
1 /*
2  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3  *
4  * Copyright (c) 2006 - 2012 Broadcom Corporation
5  * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6  * Copyright (c) 2007, 2008 Mike Christie
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  *
12  * Written by: Anil Veerabhadrappa ([email protected])
13  * Maintained by: Eddie Wai ([email protected])
14  */
15 
16 #include <linux/slab.h>
17 #include <scsi/scsi_tcq.h>
18 #include <scsi/libiscsi.h>
19 #include "bnx2i.h"
20 
23 static struct scsi_host_template bnx2i_host_template;
24 
25 /*
26  * Global endpoint resource info
27  */
28 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
29 
30 DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
31 
32 static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
33 {
34  int retval = 0;
35 
36  if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
39  retval = -EPERM;
40  return retval;
41 }
42 
54 static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
55  u32 *start_bd_off, u32 *start_bd_idx)
56 {
57  struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
58  u32 cur_offset = 0;
59  u32 cur_bd_idx = 0;
60 
61  if (buf_off) {
62  while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
63  cur_offset += bd_tbl->buffer_length;
64  cur_bd_idx++;
65  bd_tbl++;
66  }
67  }
68 
69  *start_bd_off = buf_off - cur_offset;
70  *start_bd_idx = cur_bd_idx;
71 }
72 
82 static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
83 {
84  struct bnx2i_cmd *cmd = task->dd_data;
85  u32 start_bd_offset;
86  u32 start_bd_idx;
87  u32 buffer_offset = 0;
88  u32 cmd_len = cmd->req.total_data_transfer_length;
89 
90  /* if ImmediateData is turned off & IntialR2T is turned on,
91  * there will be no immediate or unsolicited data, just return.
92  */
93  if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
94  return;
95 
96  /* Immediate data */
97  buffer_offset += task->imm_count;
98  if (task->imm_count == cmd_len)
99  return;
100 
101  if (iscsi_task_has_unsol_data(task)) {
102  bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
103  &start_bd_offset, &start_bd_idx);
104  cmd->req.ud_buffer_offset = start_bd_offset;
105  cmd->req.ud_start_bd_index = start_bd_idx;
106  buffer_offset += task->unsol_r2t.data_length;
107  }
108 
109  if (buffer_offset != cmd_len) {
110  bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
111  &start_bd_offset, &start_bd_idx);
112  if ((start_bd_offset > task->conn->session->first_burst) ||
113  (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
114  int i = 0;
115 
117  "bnx2i- error, buf offset 0x%x "
118  "bd_valid %d use_sg %d\n",
119  buffer_offset, cmd->io_tbl.bd_valid,
120  scsi_sg_count(cmd->scsi_cmd));
121  for (i = 0; i < cmd->io_tbl.bd_valid; i++)
123  "bnx2i err, bd[%d]: len %x\n",
124  i, cmd->io_tbl.bd_tbl[i].\ buffer_length);
125  }
126  cmd->req.sd_buffer_offset = start_bd_offset;
127  cmd->req.sd_start_bd_index = start_bd_idx;
128  }
129 }
130 
131 
132 
140 static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
141 {
142  struct scsi_cmnd *sc = cmd->scsi_cmd;
143  struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
144  struct scatterlist *sg;
145  int byte_count = 0;
146  int bd_count = 0;
147  int sg_count;
148  int sg_len;
149  u64 addr;
150  int i;
151 
152  BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
153 
154  sg_count = scsi_dma_map(sc);
155 
156  scsi_for_each_sg(sc, sg, sg_count, i) {
157  sg_len = sg_dma_len(sg);
158  addr = (u64) sg_dma_address(sg);
159  bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
160  bd[bd_count].buffer_addr_hi = addr >> 32;
161  bd[bd_count].buffer_length = sg_len;
162  bd[bd_count].flags = 0;
163  if (bd_count == 0)
164  bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
165 
166  byte_count += sg_len;
167  bd_count++;
168  }
169 
170  if (bd_count)
171  bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
172 
173  BUG_ON(byte_count != scsi_bufflen(sc));
174  return bd_count;
175 }
176 
183 static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
184 {
185  int bd_count;
186 
187  bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
188  if (!bd_count) {
189  struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
190 
191  bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
192  bd[0].buffer_length = bd[0].flags = 0;
193  }
194  cmd->io_tbl.bd_valid = bd_count;
195 }
196 
197 
204 void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
205 {
206  struct scsi_cmnd *sc = cmd->scsi_cmd;
207 
208  if (cmd->io_tbl.bd_valid && sc) {
209  scsi_dma_unmap(sc);
210  cmd->io_tbl.bd_valid = 0;
211  }
212 }
213 
214 static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
215 {
216  memset(&cmd->req, 0x00, sizeof(cmd->req));
217  cmd->req.op_code = 0xFF;
218  cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
219  cmd->req.bd_list_addr_hi =
220  (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
221 
222 }
223 
224 
235 static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
236  struct bnx2i_conn *bnx2i_conn,
237  u32 iscsi_cid)
238 {
239  if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
240  iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
241  "conn bind - entry #%d not free\n", iscsi_cid);
242  return -EBUSY;
243  }
244 
245  hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
246  return 0;
247 }
248 
249 
255 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
256  u16 iscsi_cid)
257 {
258  if (!hba->cid_que.conn_cid_tbl) {
259  printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
260  return NULL;
261 
262  } else if (iscsi_cid >= hba->max_active_conns) {
263  printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
264  return NULL;
265  }
266  return hba->cid_que.conn_cid_tbl[iscsi_cid];
267 }
268 
269 
274 static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
275 {
276  int idx;
277 
278  if (!hba->cid_que.cid_free_cnt)
279  return -1;
280 
281  idx = hba->cid_que.cid_q_cons_idx;
282  hba->cid_que.cid_q_cons_idx++;
283  if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
284  hba->cid_que.cid_q_cons_idx = 0;
285 
286  hba->cid_que.cid_free_cnt--;
287  return hba->cid_que.cid_que[idx];
288 }
289 
290 
296 static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
297 {
298  int idx;
299 
300  if (iscsi_cid == (u16) -1)
301  return;
302 
303  hba->cid_que.cid_free_cnt++;
304 
305  idx = hba->cid_que.cid_q_prod_idx;
306  hba->cid_que.cid_que[idx] = iscsi_cid;
307  hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
308  hba->cid_que.cid_q_prod_idx++;
309  if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
310  hba->cid_que.cid_q_prod_idx = 0;
311 }
312 
313 
321 static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
322 {
323  int mem_size;
324  int i;
325 
326  mem_size = hba->max_active_conns * sizeof(u32);
327  mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
328 
329  hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
330  if (!hba->cid_que.cid_que_base)
331  return -ENOMEM;
332 
333  mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
334  mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
335  hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
336  if (!hba->cid_que.conn_cid_tbl) {
337  kfree(hba->cid_que.cid_que_base);
338  hba->cid_que.cid_que_base = NULL;
339  return -ENOMEM;
340  }
341 
342  hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
343  hba->cid_que.cid_q_prod_idx = 0;
344  hba->cid_que.cid_q_cons_idx = 0;
345  hba->cid_que.cid_q_max_idx = hba->max_active_conns;
346  hba->cid_que.cid_free_cnt = hba->max_active_conns;
347 
348  for (i = 0; i < hba->max_active_conns; i++) {
349  hba->cid_que.cid_que[i] = i;
350  hba->cid_que.conn_cid_tbl[i] = NULL;
351  }
352  return 0;
353 }
354 
355 
360 static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
361 {
362  kfree(hba->cid_que.cid_que_base);
363  hba->cid_que.cid_que_base = NULL;
364 
365  kfree(hba->cid_que.conn_cid_tbl);
366  hba->cid_que.conn_cid_tbl = NULL;
367 }
368 
369 
378 static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
379 {
380  struct iscsi_endpoint *ep;
381  struct bnx2i_endpoint *bnx2i_ep;
382  u32 ec_div;
383 
384  ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
385  if (!ep) {
386  printk(KERN_ERR "bnx2i: Could not allocate ep\n");
387  return NULL;
388  }
389 
390  bnx2i_ep = ep->dd_data;
391  bnx2i_ep->cls_ep = ep;
392  INIT_LIST_HEAD(&bnx2i_ep->link);
393  bnx2i_ep->state = EP_STATE_IDLE;
394  bnx2i_ep->ep_iscsi_cid = (u16) -1;
395  bnx2i_ep->hba = hba;
396  bnx2i_ep->hba_age = hba->age;
397 
398  ec_div = event_coal_div;
399  while (ec_div >>= 1)
400  bnx2i_ep->ec_shift += 1;
401 
402  hba->ofld_conns_active++;
403  init_waitqueue_head(&bnx2i_ep->ofld_wait);
404  return ep;
405 }
406 
407 
412 static void bnx2i_free_ep(struct iscsi_endpoint *ep)
413 {
414  struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
415  unsigned long flags;
416 
417  spin_lock_irqsave(&bnx2i_resc_lock, flags);
418  bnx2i_ep->state = EP_STATE_IDLE;
419  bnx2i_ep->hba->ofld_conns_active--;
420 
421  if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
422  bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
423 
424  if (bnx2i_ep->conn) {
425  bnx2i_ep->conn->ep = NULL;
426  bnx2i_ep->conn = NULL;
427  }
428 
429  bnx2i_ep->hba = NULL;
430  spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
432 }
433 
434 
441 static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
442  struct bnx2i_cmd *cmd)
443 {
444  struct io_bdt *io = &cmd->io_tbl;
445  struct iscsi_bd *bd;
446 
447  io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
448  ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
449  &io->bd_tbl_dma, GFP_KERNEL);
450  if (!io->bd_tbl) {
451  iscsi_session_printk(KERN_ERR, session, "Could not "
452  "allocate bdt.\n");
453  return -ENOMEM;
454  }
455  io->bd_valid = 0;
456  return 0;
457 }
458 
465 static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
466  struct iscsi_session *session)
467 {
468  int i;
469 
470  for (i = 0; i < session->cmds_max; i++) {
471  struct iscsi_task *task = session->cmds[i];
472  struct bnx2i_cmd *cmd = task->dd_data;
473 
474  if (cmd->io_tbl.bd_tbl)
475  dma_free_coherent(&hba->pcidev->dev,
477  sizeof(struct iscsi_bd),
478  cmd->io_tbl.bd_tbl,
479  cmd->io_tbl.bd_tbl_dma);
480  }
481 
482 }
483 
484 
490 static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
491  struct iscsi_session *session)
492 {
493  int i;
494 
495  for (i = 0; i < session->cmds_max; i++) {
496  struct iscsi_task *task = session->cmds[i];
497  struct bnx2i_cmd *cmd = task->dd_data;
498 
499  task->hdr = &cmd->hdr;
500  task->hdr_max = sizeof(struct iscsi_hdr);
501 
502  if (bnx2i_alloc_bdt(hba, session, cmd))
503  goto free_bdts;
504  }
505 
506  return 0;
507 
508 free_bdts:
509  bnx2i_destroy_cmd_pool(hba, session);
510  return -ENOMEM;
511 }
512 
513 
521 static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
522 {
523  int rc = 0;
524  struct iscsi_bd *mp_bdt;
525  u64 addr;
526 
527  hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
528  &hba->mp_bd_dma, GFP_KERNEL);
529  if (!hba->mp_bd_tbl) {
530  printk(KERN_ERR "unable to allocate Middle Path BDT\n");
531  rc = -1;
532  goto out;
533  }
534 
535  hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
536  &hba->dummy_buf_dma, GFP_KERNEL);
537  if (!hba->dummy_buffer) {
538  printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
539  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
540  hba->mp_bd_tbl, hba->mp_bd_dma);
541  hba->mp_bd_tbl = NULL;
542  rc = -1;
543  goto out;
544  }
545 
546  mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
547  addr = (unsigned long) hba->dummy_buf_dma;
548  mp_bdt->buffer_addr_lo = addr & 0xffffffff;
549  mp_bdt->buffer_addr_hi = addr >> 32;
550  mp_bdt->buffer_length = PAGE_SIZE;
551  mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
552  ISCSI_BD_FIRST_IN_BD_CHAIN;
553 out:
554  return rc;
555 }
556 
557 
564 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
565 {
566  if (hba->mp_bd_tbl) {
567  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
568  hba->mp_bd_tbl, hba->mp_bd_dma);
569  hba->mp_bd_tbl = NULL;
570  }
571  if (hba->dummy_buffer) {
572  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
573  hba->dummy_buffer, hba->dummy_buf_dma);
574  hba->dummy_buffer = NULL;
575  }
576  return;
577 }
578 
590 void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
591 {
593 }
594 
602 static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
603  struct bnx2i_endpoint *ep)
604 {
606  list_add_tail(&ep->link, &hba->ep_destroy_list);
608  return 0;
609 }
610 
619 static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
620  struct bnx2i_endpoint *ep)
621 {
623  list_del_init(&ep->link);
625 
626  return 0;
627 }
628 
636 static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
637  struct bnx2i_endpoint *ep)
638 {
640  list_add_tail(&ep->link, &hba->ep_ofld_list);
642  return 0;
643 }
644 
652 static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
653  struct bnx2i_endpoint *ep)
654 {
656  list_del_init(&ep->link);
658  return 0;
659 }
660 
661 
669 struct bnx2i_endpoint *
670 bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
671 {
672  struct list_head *list;
673  struct list_head *tmp;
674  struct bnx2i_endpoint *ep;
675 
676  read_lock_bh(&hba->ep_rdwr_lock);
677  list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
678  ep = (struct bnx2i_endpoint *)list;
679 
680  if (ep->ep_iscsi_cid == iscsi_cid)
681  break;
682  ep = NULL;
683  }
685 
686  if (!ep)
687  printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
688  return ep;
689 }
690 
697 struct bnx2i_endpoint *
698 bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
699 {
700  struct list_head *list;
701  struct list_head *tmp;
702  struct bnx2i_endpoint *ep;
703 
704  read_lock_bh(&hba->ep_rdwr_lock);
705  list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
706  ep = (struct bnx2i_endpoint *)list;
707 
708  if (ep->ep_iscsi_cid == iscsi_cid)
709  break;
710  ep = NULL;
711  }
713 
714  if (!ep)
715  printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
716 
717  return ep;
718 }
719 
727 static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
728  struct bnx2i_endpoint *ep)
729 {
731  list_add_tail(&ep->link, &hba->ep_active_list);
733 }
734 
735 
743 static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
744  struct bnx2i_endpoint *ep)
745 {
747  list_del_init(&ep->link);
749 }
750 
751 
761 static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
762  struct Scsi_Host *shost)
763 {
766  else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
768  else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
770  else
772 }
773 
774 
782 struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
783 {
784  struct Scsi_Host *shost;
785  struct bnx2i_hba *hba;
786 
787  shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
788  if (!shost)
789  return NULL;
790  shost->dma_boundary = cnic->pcidev->dma_mask;
793  shost->max_channel = 0;
794  shost->max_lun = 512;
795  shost->max_cmd_len = 16;
796 
797  hba = iscsi_host_priv(shost);
798  hba->shost = shost;
799  hba->netdev = cnic->netdev;
800  /* Get PCI related information and update hba struct members */
801  hba->pcidev = cnic->pcidev;
802  pci_dev_get(hba->pcidev);
803  hba->pci_did = hba->pcidev->device;
804  hba->pci_vid = hba->pcidev->vendor;
805  hba->pci_sdid = hba->pcidev->subsystem_device;
806  hba->pci_svid = hba->pcidev->subsystem_vendor;
807  hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
808  hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
809 
811  bnx2i_setup_host_queue_size(hba, shost);
812 
813  hba->reg_base = pci_resource_start(hba->pcidev, 0);
815  hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
816  if (!hba->regview)
817  goto ioreg_map_err;
818  } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
819  hba->regview = pci_iomap(hba->pcidev, 0, 4096);
820  if (!hba->regview)
821  goto ioreg_map_err;
822  }
823 
824  if (bnx2i_setup_mp_bdt(hba))
825  goto mp_bdt_mem_err;
826 
827  INIT_LIST_HEAD(&hba->ep_ofld_list);
828  INIT_LIST_HEAD(&hba->ep_active_list);
829  INIT_LIST_HEAD(&hba->ep_destroy_list);
830  rwlock_init(&hba->ep_rdwr_lock);
831 
833 
834  /* different values for 5708/5709/57710 */
836 
837  if (bnx2i_setup_free_cid_que(hba))
838  goto cid_que_err;
839 
840  /* SQ/RQ/CQ size can be changed via sysfx interface */
843  hba->max_sqes = sq_size;
844  else
846  } else { /* 5706/5708/5709 */
848  hba->max_sqes = sq_size;
849  else
851  }
852 
853  hba->max_rqes = rq_size;
854  hba->max_cqes = hba->max_sqes + rq_size;
858  } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
860 
861  hba->num_ccell = hba->max_sqes / 2;
862 
863  spin_lock_init(&hba->lock);
864  mutex_init(&hba->net_dev_lock);
867  hba->hba_shutdown_tmo = 30 * HZ;
868  hba->conn_teardown_tmo = 20 * HZ;
869  hba->conn_ctx_destroy_tmo = 6 * HZ;
870  } else { /* 5706/5708/5709 */
871  hba->hba_shutdown_tmo = 20 * HZ;
872  hba->conn_teardown_tmo = 10 * HZ;
873  hba->conn_ctx_destroy_tmo = 2 * HZ;
874  }
875 
876 #ifdef CONFIG_32BIT
877  spin_lock_init(&hba->stat_lock);
878 #endif
879  memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
880 
881  if (iscsi_host_add(shost, &hba->pcidev->dev))
882  goto free_dump_mem;
883  return hba;
884 
885 free_dump_mem:
886  bnx2i_release_free_cid_que(hba);
887 cid_que_err:
888  bnx2i_free_mp_bdt(hba);
889 mp_bdt_mem_err:
890  if (hba->regview) {
891  pci_iounmap(hba->pcidev, hba->regview);
892  hba->regview = NULL;
893  }
894 ioreg_map_err:
895  pci_dev_put(hba->pcidev);
896  scsi_host_put(shost);
897  return NULL;
898 }
899 
906 void bnx2i_free_hba(struct bnx2i_hba *hba)
907 {
908  struct Scsi_Host *shost = hba->shost;
909 
910  iscsi_host_remove(shost);
911  INIT_LIST_HEAD(&hba->ep_ofld_list);
912  INIT_LIST_HEAD(&hba->ep_active_list);
913  INIT_LIST_HEAD(&hba->ep_destroy_list);
914  pci_dev_put(hba->pcidev);
915 
916  if (hba->regview) {
917  pci_iounmap(hba->pcidev, hba->regview);
918  hba->regview = NULL;
919  }
920  bnx2i_free_mp_bdt(hba);
921  bnx2i_release_free_cid_que(hba);
922  iscsi_host_free(shost);
923 }
924 
932 static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
933  struct bnx2i_conn *bnx2i_conn)
934 {
935  if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
936  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
937  bnx2i_conn->gen_pdu.resp_bd_tbl,
938  bnx2i_conn->gen_pdu.resp_bd_dma);
939  bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
940  }
941 
942  if (bnx2i_conn->gen_pdu.req_bd_tbl) {
943  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
944  bnx2i_conn->gen_pdu.req_bd_tbl,
945  bnx2i_conn->gen_pdu.req_bd_dma);
946  bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
947  }
948 
949  if (bnx2i_conn->gen_pdu.resp_buf) {
950  dma_free_coherent(&hba->pcidev->dev,
952  bnx2i_conn->gen_pdu.resp_buf,
953  bnx2i_conn->gen_pdu.resp_dma_addr);
954  bnx2i_conn->gen_pdu.resp_buf = NULL;
955  }
956 
957  if (bnx2i_conn->gen_pdu.req_buf) {
958  dma_free_coherent(&hba->pcidev->dev,
960  bnx2i_conn->gen_pdu.req_buf,
961  bnx2i_conn->gen_pdu.req_dma_addr);
962  bnx2i_conn->gen_pdu.req_buf = NULL;
963  }
964 }
965 
973 static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
974  struct bnx2i_conn *bnx2i_conn)
975 {
976  /* Allocate memory for login request/response buffers */
977  bnx2i_conn->gen_pdu.req_buf =
978  dma_alloc_coherent(&hba->pcidev->dev,
980  &bnx2i_conn->gen_pdu.req_dma_addr,
981  GFP_KERNEL);
982  if (bnx2i_conn->gen_pdu.req_buf == NULL)
983  goto login_req_buf_failure;
984 
985  bnx2i_conn->gen_pdu.req_buf_size = 0;
986  bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
987 
988  bnx2i_conn->gen_pdu.resp_buf =
989  dma_alloc_coherent(&hba->pcidev->dev,
991  &bnx2i_conn->gen_pdu.resp_dma_addr,
992  GFP_KERNEL);
993  if (bnx2i_conn->gen_pdu.resp_buf == NULL)
994  goto login_resp_buf_failure;
995 
996  bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
997  bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
998 
999  bnx2i_conn->gen_pdu.req_bd_tbl =
1000  dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
1001  &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
1002  if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
1003  goto login_req_bd_tbl_failure;
1004 
1005  bnx2i_conn->gen_pdu.resp_bd_tbl =
1006  dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
1007  &bnx2i_conn->gen_pdu.resp_bd_dma,
1008  GFP_KERNEL);
1009  if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
1010  goto login_resp_bd_tbl_failure;
1011 
1012  return 0;
1013 
1014 login_resp_bd_tbl_failure:
1015  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1016  bnx2i_conn->gen_pdu.req_bd_tbl,
1017  bnx2i_conn->gen_pdu.req_bd_dma);
1018  bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
1019 
1020 login_req_bd_tbl_failure:
1022  bnx2i_conn->gen_pdu.resp_buf,
1023  bnx2i_conn->gen_pdu.resp_dma_addr);
1024  bnx2i_conn->gen_pdu.resp_buf = NULL;
1025 login_resp_buf_failure:
1027  bnx2i_conn->gen_pdu.req_buf,
1028  bnx2i_conn->gen_pdu.req_dma_addr);
1029  bnx2i_conn->gen_pdu.req_buf = NULL;
1030 login_req_buf_failure:
1031  iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
1032  "login resource alloc failed!!\n");
1033  return -ENOMEM;
1034 
1035 }
1036 
1037 
1045 static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
1046 {
1047  struct iscsi_bd *bd_tbl;
1048 
1049  bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
1050 
1051  bd_tbl->buffer_addr_hi =
1052  (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
1053  bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
1054  bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
1055  bnx2i_conn->gen_pdu.req_buf;
1056  bd_tbl->reserved0 = 0;
1057  bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1058  ISCSI_BD_FIRST_IN_BD_CHAIN;
1059 
1060  bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1061  bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1062  bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1063  bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1064  bd_tbl->reserved0 = 0;
1065  bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1066  ISCSI_BD_FIRST_IN_BD_CHAIN;
1067 }
1068 
1069 
1077 static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1078 {
1079  struct bnx2i_cmd *cmd = task->dd_data;
1080  struct bnx2i_conn *bnx2i_conn = cmd->conn;
1081  int rc = 0;
1082  char *buf;
1083  int data_len;
1084 
1085  bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1086  switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1087  case ISCSI_OP_LOGIN:
1088  bnx2i_send_iscsi_login(bnx2i_conn, task);
1089  break;
1090  case ISCSI_OP_NOOP_OUT:
1091  data_len = bnx2i_conn->gen_pdu.req_buf_size;
1092  buf = bnx2i_conn->gen_pdu.req_buf;
1093  if (data_len)
1094  rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1095  buf, data_len, 1);
1096  else
1097  rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1098  NULL, 0, 1);
1099  break;
1100  case ISCSI_OP_LOGOUT:
1101  rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1102  break;
1103  case ISCSI_OP_SCSI_TMFUNC:
1104  rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1105  break;
1106  case ISCSI_OP_TEXT:
1107  rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
1108  break;
1109  default:
1110  iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1111  "send_gen: unsupported op 0x%x\n",
1112  task->hdr->opcode);
1113  }
1114  return rc;
1115 }
1116 
1117 
1118 /**********************************************************************
1119  * SCSI-ML Interface
1120  **********************************************************************/
1121 
1127 static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1128 {
1129  u32 dword;
1130  int lpcnt;
1131  u8 *srcp;
1132  u32 *dstp;
1133  u32 scsi_lun[2];
1134 
1135  int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1136  cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1137  cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1138 
1139  lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1140  srcp = (u8 *) sc->cmnd;
1141  dstp = (u32 *) cmd->req.cdb;
1142  while (lpcnt--) {
1143  memcpy(&dword, (const void *) srcp, 4);
1144  *dstp = cpu_to_be32(dword);
1145  srcp += 4;
1146  dstp++;
1147  }
1148  if (sc->cmd_len & 0x3) {
1149  dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1150  *dstp = cpu_to_be32(dword);
1151  }
1152 }
1153 
1154 static void bnx2i_cleanup_task(struct iscsi_task *task)
1155 {
1156  struct iscsi_conn *conn = task->conn;
1157  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1158  struct bnx2i_hba *hba = bnx2i_conn->hba;
1159 
1160  /*
1161  * mgmt task or cmd was never sent to us to transmit.
1162  */
1163  if (!task->sc || task->state == ISCSI_TASK_PENDING)
1164  return;
1165  /*
1166  * need to clean-up task context to claim dma buffers
1167  */
1168  if (task->state == ISCSI_TASK_ABRT_TMF) {
1169  bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1170 
1171  spin_unlock_bh(&conn->session->lock);
1174  spin_lock_bh(&conn->session->lock);
1175  }
1177 }
1178 
1184 static int
1185 bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1186 {
1187  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1188  struct bnx2i_hba *hba = bnx2i_conn->hba;
1189  struct bnx2i_cmd *cmd = task->dd_data;
1190 
1191  memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1192 
1193  bnx2i_setup_cmd_wqe_template(cmd);
1194  bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1195 
1196  /* Tx PDU/data length count */
1197  ADD_STATS_64(hba, tx_pdus, 1);
1198  ADD_STATS_64(hba, tx_bytes, task->data_count);
1199 
1200  if (task->data_count) {
1201  memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1202  task->data_count);
1203  bnx2i_conn->gen_pdu.req_wr_ptr =
1204  bnx2i_conn->gen_pdu.req_buf + task->data_count;
1205  }
1206  cmd->conn = conn->dd_data;
1207  cmd->scsi_cmd = NULL;
1208  return bnx2i_iscsi_send_generic_request(task);
1209 }
1210 
1217 static int bnx2i_task_xmit(struct iscsi_task *task)
1218 {
1219  struct iscsi_conn *conn = task->conn;
1220  struct iscsi_session *session = conn->session;
1221  struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1222  struct bnx2i_hba *hba = iscsi_host_priv(shost);
1223  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1224  struct scsi_cmnd *sc = task->sc;
1225  struct bnx2i_cmd *cmd = task->dd_data;
1226  struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1227 
1228  if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1229  hba->max_sqes)
1230  return -ENOMEM;
1231 
1232  /*
1233  * If there is no scsi_cmnd this must be a mgmt task
1234  */
1235  if (!sc)
1236  return bnx2i_mtask_xmit(conn, task);
1237 
1238  bnx2i_setup_cmd_wqe_template(cmd);
1239  cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1240  cmd->conn = bnx2i_conn;
1241  cmd->scsi_cmd = sc;
1242  cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1243  cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1244 
1245  bnx2i_iscsi_map_sg_list(cmd);
1246  bnx2i_cpy_scsi_cdb(sc, cmd);
1247 
1248  cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1249  if (sc->sc_data_direction == DMA_TO_DEVICE) {
1250  cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1251  cmd->req.itt = task->itt |
1252  (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1253  bnx2i_setup_write_cmd_bd_info(task);
1254  } else {
1255  if (scsi_bufflen(sc))
1256  cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1257  cmd->req.itt = task->itt |
1258  (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1259  }
1260 
1261  cmd->req.num_bds = cmd->io_tbl.bd_valid;
1262  if (!cmd->io_tbl.bd_valid) {
1263  cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1264  cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1265  cmd->req.num_bds = 1;
1266  }
1267 
1268  bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1269  return 0;
1270 }
1271 
1280 static struct iscsi_cls_session *
1281 bnx2i_session_create(struct iscsi_endpoint *ep,
1282  uint16_t cmds_max, uint16_t qdepth,
1284 {
1285  struct Scsi_Host *shost;
1286  struct iscsi_cls_session *cls_session;
1287  struct bnx2i_hba *hba;
1288  struct bnx2i_endpoint *bnx2i_ep;
1289 
1290  if (!ep) {
1291  printk(KERN_ERR "bnx2i: missing ep.\n");
1292  return NULL;
1293  }
1294 
1295  bnx2i_ep = ep->dd_data;
1296  shost = bnx2i_ep->hba->shost;
1297  hba = iscsi_host_priv(shost);
1298  if (bnx2i_adapter_ready(hba))
1299  return NULL;
1300 
1301  /*
1302  * user can override hw limit as long as it is within
1303  * the min/max.
1304  */
1305  if (cmds_max > hba->max_sqes)
1306  cmds_max = hba->max_sqes;
1307  else if (cmds_max < BNX2I_SQ_WQES_MIN)
1308  cmds_max = BNX2I_SQ_WQES_MIN;
1309 
1310  cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1311  cmds_max, 0, sizeof(struct bnx2i_cmd),
1312  initial_cmdsn, ISCSI_MAX_TARGET);
1313  if (!cls_session)
1314  return NULL;
1315 
1316  if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1317  goto session_teardown;
1318  return cls_session;
1319 
1320 session_teardown:
1321  iscsi_session_teardown(cls_session);
1322  return NULL;
1323 }
1324 
1325 
1333 static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1334 {
1335  struct iscsi_session *session = cls_session->dd_data;
1336  struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1337  struct bnx2i_hba *hba = iscsi_host_priv(shost);
1338 
1339  bnx2i_destroy_cmd_pool(hba, session);
1340  iscsi_session_teardown(cls_session);
1341 }
1342 
1343 
1351 static struct iscsi_cls_conn *
1352 bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1353 {
1354  struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1355  struct bnx2i_hba *hba = iscsi_host_priv(shost);
1356  struct bnx2i_conn *bnx2i_conn;
1357  struct iscsi_cls_conn *cls_conn;
1358  struct iscsi_conn *conn;
1359 
1360  cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1361  cid);
1362  if (!cls_conn)
1363  return NULL;
1364  conn = cls_conn->dd_data;
1365 
1366  bnx2i_conn = conn->dd_data;
1367  bnx2i_conn->cls_conn = cls_conn;
1368  bnx2i_conn->hba = hba;
1369 
1370  atomic_set(&bnx2i_conn->work_cnt, 0);
1371 
1372  /* 'ep' ptr will be assigned in bind() call */
1373  bnx2i_conn->ep = NULL;
1374  init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1375 
1376  if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1378  "conn_new: login resc alloc failed!!\n");
1379  goto free_conn;
1380  }
1381 
1382  return cls_conn;
1383 
1384 free_conn:
1385  iscsi_conn_teardown(cls_conn);
1386  return NULL;
1387 }
1388 
1401 static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1402  struct iscsi_cls_conn *cls_conn,
1403  uint64_t transport_fd, int is_leading)
1404 {
1405  struct iscsi_conn *conn = cls_conn->dd_data;
1406  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1407  struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1408  struct bnx2i_hba *hba = iscsi_host_priv(shost);
1409  struct bnx2i_endpoint *bnx2i_ep;
1410  struct iscsi_endpoint *ep;
1411  int ret_code;
1412 
1413  ep = iscsi_lookup_endpoint(transport_fd);
1414  if (!ep)
1415  return -EINVAL;
1416  /*
1417  * Forcefully terminate all in progress connection recovery at the
1418  * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
1419  */
1420  if (bnx2i_adapter_ready(hba))
1421  return -EIO;
1422 
1423  bnx2i_ep = ep->dd_data;
1424  if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1425  (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1426  /* Peer disconnect via' FIN or RST */
1427  return -EINVAL;
1428 
1429  if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1430  return -EINVAL;
1431 
1432  if (bnx2i_ep->hba != hba) {
1433  /* Error - TCP connection does not belong to this device
1434  */
1436  "conn bind, ep=0x%p (%s) does not",
1437  bnx2i_ep, bnx2i_ep->hba->netdev->name);
1439  "belong to hba (%s)\n",
1440  hba->netdev->name);
1441  return -EEXIST;
1442  }
1443  bnx2i_ep->conn = bnx2i_conn;
1444  bnx2i_conn->ep = bnx2i_ep;
1445  bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1446  bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1447 
1448  ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1449  bnx2i_ep->ep_iscsi_cid);
1450 
1451  /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1452  * driver needs to explicitly replenish RQ index during setup.
1453  */
1454  if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1455  bnx2i_put_rq_buf(bnx2i_conn, 0);
1456 
1458  return ret_code;
1459 }
1460 
1461 
1469 static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1470 {
1471  struct iscsi_conn *conn = cls_conn->dd_data;
1472  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1473  struct Scsi_Host *shost;
1474  struct bnx2i_hba *hba;
1475  struct bnx2i_work *work, *tmp;
1476  unsigned cpu = 0;
1477  struct bnx2i_percpu_s *p;
1478 
1479  shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1480  hba = iscsi_host_priv(shost);
1481 
1482  bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1483 
1484  if (atomic_read(&bnx2i_conn->work_cnt)) {
1485  for_each_online_cpu(cpu) {
1486  p = &per_cpu(bnx2i_percpu, cpu);
1487  spin_lock_bh(&p->p_work_lock);
1488  list_for_each_entry_safe(work, tmp,
1489  &p->work_list, list) {
1490  if (work->session == conn->session &&
1491  work->bnx2i_conn == bnx2i_conn) {
1492  list_del_init(&work->list);
1493  kfree(work);
1494  if (!atomic_dec_and_test(
1495  &bnx2i_conn->work_cnt))
1496  break;
1497  }
1498  }
1499  spin_unlock_bh(&p->p_work_lock);
1500  }
1501  }
1502 
1503  iscsi_conn_teardown(cls_conn);
1504 }
1505 
1506 
1515 static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
1516  enum iscsi_param param, char *buf)
1517 {
1518  struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
1519  struct bnx2i_hba *hba = bnx2i_ep->hba;
1520  int len = -ENOTCONN;
1521 
1522  if (!hba)
1523  return -ENOTCONN;
1524 
1525  switch (param) {
1526  case ISCSI_PARAM_CONN_PORT:
1527  mutex_lock(&hba->net_dev_lock);
1528  if (bnx2i_ep->cm_sk)
1529  len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
1530  mutex_unlock(&hba->net_dev_lock);
1531  break;
1533  mutex_lock(&hba->net_dev_lock);
1534  if (bnx2i_ep->cm_sk)
1535  len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
1536  mutex_unlock(&hba->net_dev_lock);
1537  break;
1538  default:
1539  return -ENOSYS;
1540  }
1541 
1542  return len;
1543 }
1544 
1551 static int bnx2i_host_get_param(struct Scsi_Host *shost,
1552  enum iscsi_host_param param, char *buf)
1553 {
1554  struct bnx2i_hba *hba = iscsi_host_priv(shost);
1555  int len = 0;
1556 
1557  switch (param) {
1559  len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1560  break;
1562  len = sprintf(buf, "%s\n", hba->netdev->name);
1563  break;
1565  struct list_head *active_list = &hba->ep_active_list;
1566 
1567  read_lock_bh(&hba->ep_rdwr_lock);
1568  if (!list_empty(&hba->ep_active_list)) {
1569  struct bnx2i_endpoint *bnx2i_ep;
1570  struct cnic_sock *csk;
1571 
1572  bnx2i_ep = list_first_entry(active_list,
1573  struct bnx2i_endpoint,
1574  link);
1575  csk = bnx2i_ep->cm_sk;
1576  if (test_bit(SK_F_IPV6, &csk->flags))
1577  len = sprintf(buf, "%pI6\n", csk->src_ip);
1578  else
1579  len = sprintf(buf, "%pI4\n", csk->src_ip);
1580  }
1582  break;
1583  }
1584  default:
1585  return iscsi_host_get_param(shost, param, buf);
1586  }
1587  return len;
1588 }
1589 
1596 static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1597 {
1598  struct iscsi_conn *conn = cls_conn->dd_data;
1599  struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1600 
1601  bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1603 
1604  /*
1605  * this should normally not sleep for a long time so it should
1606  * not disrupt the caller.
1607  */
1608  bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1609  bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1610  bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1611  add_timer(&bnx2i_conn->ep->ofld_timer);
1612  /* update iSCSI context for this conn, wait for CNIC to complete */
1613  wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1614  bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1615 
1616  if (signal_pending(current))
1618  del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1619 
1620  iscsi_conn_start(cls_conn);
1621  return 0;
1622 }
1623 
1624 
1630 static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1631  struct iscsi_stats *stats)
1632 {
1633  struct iscsi_conn *conn = cls_conn->dd_data;
1634 
1635  stats->txdata_octets = conn->txdata_octets;
1636  stats->rxdata_octets = conn->rxdata_octets;
1637  stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1638  stats->dataout_pdus = conn->dataout_pdus_cnt;
1639  stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1640  stats->datain_pdus = conn->datain_pdus_cnt;
1641  stats->r2t_pdus = conn->r2t_pdus_cnt;
1642  stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1643  stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1644  stats->custom_length = 3;
1645  strcpy(stats->custom[2].desc, "eh_abort_cnt");
1646  stats->custom[2].value = conn->eh_abort_cnt;
1647  stats->digest_err = 0;
1648  stats->timeout_err = 0;
1649  stats->custom_length = 0;
1650 }
1651 
1652 
1659 static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1660 {
1661  struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1662  struct bnx2i_hba *hba;
1663  struct cnic_dev *cnic = NULL;
1664 
1665  hba = get_adapter_list_head();
1666  if (hba && hba->cnic)
1667  cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1668  if (!cnic) {
1669  printk(KERN_ALERT "bnx2i: no route,"
1670  "can't connect using cnic\n");
1671  goto no_nx2_route;
1672  }
1673  hba = bnx2i_find_hba_for_cnic(cnic);
1674  if (!hba)
1675  goto no_nx2_route;
1676 
1677  if (bnx2i_adapter_ready(hba)) {
1678  printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1679  goto no_nx2_route;
1680  }
1681  if (hba->netdev->mtu > hba->mtu_supported) {
1682  printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1683  hba->netdev->name, hba->netdev->mtu);
1684  printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1685  hba->mtu_supported);
1686  goto no_nx2_route;
1687  }
1688  return hba;
1689 no_nx2_route:
1690  return NULL;
1691 }
1692 
1693 
1701 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1702  struct bnx2i_endpoint *ep)
1703 {
1705  hba->cnic->cm_destroy(ep->cm_sk);
1706 
1709  if (ep->conn && ep->conn->cls_conn &&
1710  ep->conn->cls_conn->dd_data) {
1711  struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1712 
1713  /* Must suspend all rx queue activity for this ep */
1715  }
1716  /* CONN_DISCONNECT timeout may or may not be an issue depending
1717  * on what transcribed in TCP layer, different targets behave
1718  * differently
1719  */
1720  printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1721  "please submit GRC Dump, NW/PCIe trace, "
1722  "driver msgs to developers for analysis\n",
1723  hba->netdev->name);
1724  }
1725 
1727  init_timer(&ep->ofld_timer);
1728  ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1729  ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1730  ep->ofld_timer.data = (unsigned long) ep;
1731  add_timer(&ep->ofld_timer);
1732 
1733  bnx2i_ep_destroy_list_add(hba, ep);
1734 
1735  /* destroy iSCSI context, wait for it to complete */
1736  if (bnx2i_send_conn_destroy(hba, ep))
1738 
1740  (ep->state != EP_STATE_CLEANUP_START));
1741 
1742  if (signal_pending(current))
1744  del_timer_sync(&ep->ofld_timer);
1745 
1746  bnx2i_ep_destroy_list_del(hba, ep);
1747 
1748  if (ep->state != EP_STATE_CLEANUP_CMPL)
1749  /* should never happen */
1750  printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1751 
1752  return 0;
1753 }
1754 
1755 
1768 static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1769  struct sockaddr *dst_addr,
1770  int non_blocking)
1771 {
1772  u32 iscsi_cid = BNX2I_CID_RESERVED;
1773  struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1774  struct sockaddr_in6 *desti6;
1775  struct bnx2i_endpoint *bnx2i_ep;
1776  struct bnx2i_hba *hba;
1777  struct cnic_dev *cnic;
1778  struct cnic_sockaddr saddr;
1779  struct iscsi_endpoint *ep;
1780  int rc = 0;
1781 
1782  if (shost) {
1783  /* driver is given scsi host to work with */
1784  hba = iscsi_host_priv(shost);
1785  } else
1786  /*
1787  * check if the given destination can be reached through
1788  * a iscsi capable NetXtreme2 device
1789  */
1790  hba = bnx2i_check_route(dst_addr);
1791 
1792  if (!hba) {
1793  rc = -EINVAL;
1794  goto nohba;
1795  }
1796  mutex_lock(&hba->net_dev_lock);
1797 
1798  if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1799  rc = -EPERM;
1800  goto check_busy;
1801  }
1802  cnic = hba->cnic;
1803  ep = bnx2i_alloc_ep(hba);
1804  if (!ep) {
1805  rc = -ENOMEM;
1806  goto check_busy;
1807  }
1808  bnx2i_ep = ep->dd_data;
1809 
1810  atomic_set(&bnx2i_ep->num_active_cmds, 0);
1811  iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1812  if (iscsi_cid == -1) {
1813  printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1814  "iscsi cid\n", hba->netdev->name);
1815  rc = -ENOMEM;
1816  bnx2i_free_ep(ep);
1817  goto check_busy;
1818  }
1819  bnx2i_ep->hba_age = hba->age;
1820 
1821  rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1822  if (rc != 0) {
1823  printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1824  "\n", hba->netdev->name);
1825  rc = -ENOMEM;
1826  goto qp_resc_err;
1827  }
1828 
1829  bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1830  bnx2i_ep->state = EP_STATE_OFLD_START;
1831  bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1832 
1833  init_timer(&bnx2i_ep->ofld_timer);
1834  bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1835  bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1836  bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1837  add_timer(&bnx2i_ep->ofld_timer);
1838 
1839  if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1840  if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1841  printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1842  hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1843  rc = -EBUSY;
1844  } else
1845  rc = -ENOSPC;
1846  printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1847  "\n", hba->netdev->name);
1848  bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1849  goto conn_failed;
1850  }
1851 
1852  /* Wait for CNIC hardware to setup conn context and return 'cid' */
1854  bnx2i_ep->state != EP_STATE_OFLD_START);
1855 
1856  if (signal_pending(current))
1858  del_timer_sync(&bnx2i_ep->ofld_timer);
1859 
1860  bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1861 
1862  if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1863  if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1864  printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1865  hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1866  rc = -EBUSY;
1867  } else
1868  rc = -ENOSPC;
1869  goto conn_failed;
1870  }
1871 
1872  rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1873  iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1874  if (rc) {
1875  rc = -EINVAL;
1876  /* Need to terminate and cleanup the connection */
1877  goto release_ep;
1878  }
1879 
1880  bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1881  bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1882  clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1883 
1884  memset(&saddr, 0, sizeof(saddr));
1885  if (dst_addr->sa_family == AF_INET) {
1886  desti = (struct sockaddr_in *) dst_addr;
1887  saddr.remote.v4 = *desti;
1888  saddr.local.v4.sin_family = desti->sin_family;
1889  } else if (dst_addr->sa_family == AF_INET6) {
1890  desti6 = (struct sockaddr_in6 *) dst_addr;
1891  saddr.remote.v6 = *desti6;
1892  saddr.local.v6.sin6_family = desti6->sin6_family;
1893  }
1894 
1895  bnx2i_ep->timestamp = jiffies;
1896  bnx2i_ep->state = EP_STATE_CONNECT_START;
1898  rc = -EINVAL;
1899  goto conn_failed;
1900  } else
1901  rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1902  if (rc)
1903  goto release_ep;
1904 
1905  bnx2i_ep_active_list_add(hba, bnx2i_ep);
1906 
1907  if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1908  goto del_active_ep;
1909 
1910  mutex_unlock(&hba->net_dev_lock);
1911  return ep;
1912 
1913 del_active_ep:
1914  bnx2i_ep_active_list_del(hba, bnx2i_ep);
1915 release_ep:
1916  if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1917  mutex_unlock(&hba->net_dev_lock);
1918  return ERR_PTR(rc);
1919  }
1920 conn_failed:
1921  bnx2i_free_qp_resc(hba, bnx2i_ep);
1922 qp_resc_err:
1923  bnx2i_free_ep(ep);
1924 check_busy:
1925  mutex_unlock(&hba->net_dev_lock);
1926 nohba:
1927  return ERR_PTR(rc);
1928 }
1929 
1930 
1938 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1939 {
1940  struct bnx2i_endpoint *bnx2i_ep;
1941  int rc = 0;
1942 
1943  bnx2i_ep = ep->dd_data;
1944  if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1945  (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1946  (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1947  return -1;
1948  if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1949  return 1;
1950 
1952  ((bnx2i_ep->state ==
1954  (bnx2i_ep->state ==
1956  (bnx2i_ep->state ==
1958  msecs_to_jiffies(timeout_ms));
1959  if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1960  rc = -1;
1961 
1962  if (rc > 0)
1963  return 1;
1964  else if (!rc)
1965  return 0; /* timeout */
1966  else
1967  return rc;
1968 }
1969 
1970 
1977 static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1978 {
1979  int ret;
1980  int cnic_dev_10g = 0;
1981 
1982  if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1983  cnic_dev_10g = 1;
1984 
1985  switch (bnx2i_ep->state) {
1987  case EP_STATE_OFLD_FAILED:
1989  ret = 0;
1990  break;
1996  case EP_STATE_TCP_FIN_RCVD:
1997  case EP_STATE_LOGOUT_SENT:
2000  ret = 1;
2001  break;
2002  case EP_STATE_TCP_RST_RCVD:
2003  if (cnic_dev_10g)
2004  ret = 0;
2005  else
2006  ret = 1;
2007  break;
2008  default:
2009  ret = 0;
2010  }
2011 
2012  return ret;
2013 }
2014 
2015 
2016 /*
2017  * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
2018  * @ep: TCP connection (bnx2i endpoint) handle
2019  *
2020  * executes TCP connection teardown process
2021  */
2022 int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2024  struct bnx2i_hba *hba = bnx2i_ep->hba;
2025  struct cnic_dev *cnic;
2026  struct iscsi_session *session = NULL;
2027  struct iscsi_conn *conn = NULL;
2028  int ret = 0;
2029  int close = 0;
2030  int close_ret = 0;
2031 
2032  if (!hba)
2033  return 0;
2034 
2035  cnic = hba->cnic;
2036  if (!cnic)
2037  return 0;
2038 
2039  if (bnx2i_ep->state == EP_STATE_IDLE ||
2040  bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2041  return 0;
2042 
2043  if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
2044  goto destroy_conn;
2045 
2046  if (bnx2i_ep->conn) {
2047  conn = bnx2i_ep->conn->cls_conn->dd_data;
2048  session = conn->session;
2049  }
2050 
2051  init_timer(&bnx2i_ep->ofld_timer);
2052  bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
2053  bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
2054  bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
2055  add_timer(&bnx2i_ep->ofld_timer);
2056 
2058  goto out;
2059 
2060  if (session) {
2061  spin_lock_bh(&session->lock);
2062  if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2063  if (session->state == ISCSI_STATE_LOGGING_OUT) {
2064  if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
2065  /* Logout sent, but no resp */
2066  printk(KERN_ALERT "bnx2i (%s): WARNING"
2067  " logout response was not "
2068  "received!\n",
2069  bnx2i_ep->hba->netdev->name);
2070  } else if (bnx2i_ep->state ==
2072  close = 1;
2073  }
2074  } else
2075  close = 1;
2076 
2077  spin_unlock_bh(&session->lock);
2078  }
2079 
2080  bnx2i_ep->state = EP_STATE_DISCONN_START;
2081 
2082  if (close)
2083  close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
2084  else
2085  close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2086 
2087  if (close_ret)
2088  printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2089  bnx2i_ep->hba->netdev->name, close, close_ret);
2090  else
2091  /* wait for option-2 conn teardown */
2093  bnx2i_ep->state != EP_STATE_DISCONN_START);
2094 
2095  if (signal_pending(current))
2097  del_timer_sync(&bnx2i_ep->ofld_timer);
2098 
2099 destroy_conn:
2100  bnx2i_ep_active_list_del(hba, bnx2i_ep);
2101  if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2102  return -EINVAL;
2103 out:
2104  bnx2i_ep->state = EP_STATE_IDLE;
2105  return ret;
2106 }
2107 
2108 
2115 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2116 {
2117  struct bnx2i_endpoint *bnx2i_ep;
2118  struct bnx2i_conn *bnx2i_conn = NULL;
2119  struct iscsi_conn *conn = NULL;
2120  struct bnx2i_hba *hba;
2121 
2122  bnx2i_ep = ep->dd_data;
2123 
2124  /* driver should not attempt connection cleanup until TCP_CONNECT
2125  * completes either successfully or fails. Timeout is 9-secs, so
2126  * wait for it to complete
2127  */
2128  while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
2129  !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
2130  msleep(250);
2131 
2132  if (bnx2i_ep->conn) {
2133  bnx2i_conn = bnx2i_ep->conn;
2134  conn = bnx2i_conn->cls_conn->dd_data;
2135  iscsi_suspend_queue(conn);
2136  }
2137  hba = bnx2i_ep->hba;
2138 
2139  mutex_lock(&hba->net_dev_lock);
2140 
2141  if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2142  goto out;
2143 
2144  if (bnx2i_ep->state == EP_STATE_IDLE)
2145  goto free_resc;
2146 
2147  if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2148  (bnx2i_ep->hba_age != hba->age)) {
2149  bnx2i_ep_active_list_del(hba, bnx2i_ep);
2150  goto free_resc;
2151  }
2152 
2153  /* Do all chip cleanup here */
2154  if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
2155  mutex_unlock(&hba->net_dev_lock);
2156  return;
2157  }
2158 free_resc:
2159  bnx2i_free_qp_resc(hba, bnx2i_ep);
2160 
2161  if (bnx2i_conn)
2162  bnx2i_conn->ep = NULL;
2163 
2164  bnx2i_free_ep(ep);
2165 out:
2166  mutex_unlock(&hba->net_dev_lock);
2167 
2169 }
2170 
2171 
2177 static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2178 {
2179  struct bnx2i_hba *hba = iscsi_host_priv(shost);
2180  char *buf = (char *) params;
2181  u16 len = sizeof(*params);
2182 
2183  /* handled by cnic driver */
2184  hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
2185  len);
2186 
2187  return 0;
2188 }
2189 
2190 static umode_t bnx2i_attr_is_visible(int param_type, int param)
2191 {
2192  switch (param_type) {
2193  case ISCSI_HOST_PARAM:
2194  switch (param) {
2198  return S_IRUGO;
2199  default:
2200  return 0;
2201  }
2202  case ISCSI_PARAM:
2203  switch (param) {
2209  case ISCSI_PARAM_CONN_PORT:
2213  case ISCSI_PARAM_PING_TMO:
2214  case ISCSI_PARAM_RECV_TMO:
2216  case ISCSI_PARAM_MAX_R2T:
2219  case ISCSI_PARAM_MAX_BURST:
2222  case ISCSI_PARAM_ERL:
2224  case ISCSI_PARAM_TPGT:
2225  case ISCSI_PARAM_USERNAME:
2226  case ISCSI_PARAM_PASSWORD:
2230  case ISCSI_PARAM_ABORT_TMO:
2235  return S_IRUGO;
2236  default:
2237  return 0;
2238  }
2239  }
2240 
2241  return 0;
2242 }
2243 
2244 /*
2245  * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
2246  * used while registering with the scsi host and iSCSI transport module.
2247  */
2248 static struct scsi_host_template bnx2i_host_template = {
2249  .module = THIS_MODULE,
2250  .name = "Broadcom Offload iSCSI Initiator",
2251  .proc_name = "bnx2i",
2252  .queuecommand = iscsi_queuecommand,
2253  .eh_abort_handler = iscsi_eh_abort,
2254  .eh_device_reset_handler = iscsi_eh_device_reset,
2255  .eh_target_reset_handler = iscsi_eh_recover_target,
2256  .change_queue_depth = iscsi_change_queue_depth,
2257  .target_alloc = iscsi_target_alloc,
2258  .can_queue = 2048,
2259  .max_sectors = 127,
2260  .cmd_per_lun = 128,
2261  .this_id = -1,
2262  .use_clustering = ENABLE_CLUSTERING,
2263  .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2264  .shost_attrs = bnx2i_dev_attributes,
2265 };
2266 
2268  .owner = THIS_MODULE,
2269  .name = "bnx2i",
2270  .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2273  CAP_TEXT_NEGO,
2274  .create_session = bnx2i_session_create,
2275  .destroy_session = bnx2i_session_destroy,
2276  .create_conn = bnx2i_conn_create,
2277  .bind_conn = bnx2i_conn_bind,
2278  .destroy_conn = bnx2i_conn_destroy,
2279  .attr_is_visible = bnx2i_attr_is_visible,
2280  .set_param = iscsi_set_param,
2281  .get_conn_param = iscsi_conn_get_param,
2282  .get_session_param = iscsi_session_get_param,
2283  .get_host_param = bnx2i_host_get_param,
2284  .start_conn = bnx2i_conn_start,
2285  .stop_conn = iscsi_conn_stop,
2286  .send_pdu = iscsi_conn_send_pdu,
2287  .xmit_task = bnx2i_task_xmit,
2288  .get_stats = bnx2i_conn_get_stats,
2289  /* TCP connect - disconnect - option-2 interface calls */
2290  .get_ep_param = bnx2i_ep_get_param,
2291  .ep_connect = bnx2i_ep_connect,
2292  .ep_poll = bnx2i_ep_poll,
2293  .ep_disconnect = bnx2i_ep_disconnect,
2294  .set_path = bnx2i_nl_set_path,
2295  /* Error recovery timeout call */
2296  .session_recovery_timedout = iscsi_session_recovery_timedout,
2297  .cleanup_task = bnx2i_cleanup_task,
2298 };
2299