18 static void bnx2fc_fastpath_notification(
struct bnx2fc_hba *hba,
20 static void bnx2fc_process_ofld_cmpl(
struct bnx2fc_hba *hba,
22 static void bnx2fc_process_enable_conn_cmpl(
struct bnx2fc_hba *hba,
25 static void bnx2fc_process_conn_destroy_cmpl(
struct bnx2fc_hba *hba,
31 struct kwqe *kwqe_arr[2];
43 kwqe_arr[0] = (
struct kwqe *) &stat_req;
45 if (hba->
cnic && hba->
cnic->submit_kwqes)
46 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
65 struct kwqe *kwqe_arr[3];
131 kwqe_arr[0] = (
struct kwqe *) &fcoe_init1;
132 kwqe_arr[1] = (
struct kwqe *) &fcoe_init2;
133 kwqe_arr[2] = (
struct kwqe *) &fcoe_init3;
135 if (hba->
cnic && hba->
cnic->submit_kwqes)
136 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
143 struct kwqe *kwqe_arr[2];
152 kwqe_arr[0] = (
struct kwqe *) &fcoe_destroy;
154 if (hba->
cnic && hba->
cnic->submit_kwqes)
155 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
172 struct kwqe *kwqe_arr[4];
188 ofld_req1.
hdr.flags =
212 ofld_req2.
hdr.flags =
230 ofld_req3.
hdr.flags =
249 ofld_req3.
s_id[0] = (port_id & 0x000000FF);
250 ofld_req3.
s_id[1] = (port_id & 0x0000FF00) >> 8;
251 ofld_req3.
s_id[2] = (port_id & 0x00FF0000) >> 16;
254 ofld_req3.
d_id[0] = (port_id & 0x000000FF);
255 ofld_req3.
d_id[1] = (port_id & 0x0000FF00) >> 8;
256 ofld_req3.
d_id[2] = (port_id & 0x00FF0000) >> 16;
288 ofld_req3.
flags |= 1 <<
305 ofld_req4.
hdr.flags =
333 kwqe_arr[0] = (
struct kwqe *) &ofld_req1;
334 kwqe_arr[1] = (
struct kwqe *) &ofld_req2;
335 kwqe_arr[2] = (
struct kwqe *) &ofld_req3;
336 kwqe_arr[3] = (
struct kwqe *) &ofld_req4;
338 if (hba->
cnic && hba->
cnic->submit_kwqes)
339 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
350 static int bnx2fc_send_session_enable_req(
struct fcoe_port *
port,
353 struct kwqe *kwqe_arr[2];
379 enbl_req.dst_mac_addr_lo[0] = ctlr->
dest_addr[5];
380 enbl_req.dst_mac_addr_lo[1] = ctlr->
dest_addr[4];
381 enbl_req.dst_mac_addr_mid[0] = ctlr->
dest_addr[3];
382 enbl_req.dst_mac_addr_mid[1] = ctlr->
dest_addr[2];
383 enbl_req.dst_mac_addr_hi[0] = ctlr->
dest_addr[1];
384 enbl_req.dst_mac_addr_hi[1] = ctlr->
dest_addr[0];
387 if (port_id != tgt->
sid) {
389 "sid = 0x%x\n", port_id, tgt->
sid);
392 enbl_req.s_id[0] = (port_id & 0x000000FF);
393 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
394 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
397 enbl_req.d_id[0] = (port_id & 0x000000FF);
398 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
399 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
400 enbl_req.vlan_tag =
interface->vlan_id <<
403 enbl_req.vlan_flag =
interface->vlan_enabled;
407 kwqe_arr[0] = (
struct kwqe *) &enbl_req;
409 if (hba->
cnic && hba->
cnic->submit_kwqes)
410 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
427 struct kwqe *kwqe_arr[2];
433 memset(&disable_req, 0x00,
436 disable_req.
hdr.flags =
454 disable_req.
s_id[0] = (port_id & 0x000000FF);
455 disable_req.
s_id[1] = (port_id & 0x0000FF00) >> 8;
456 disable_req.
s_id[2] = (port_id & 0x00FF0000) >> 16;
460 disable_req.
d_id[0] = (port_id & 0x000000FF);
461 disable_req.
d_id[1] = (port_id & 0x0000FF00) >> 8;
462 disable_req.
d_id[2] = (port_id & 0x00FF0000) >> 16;
471 kwqe_arr[0] = (
struct kwqe *) &disable_req;
473 if (hba->
cnic && hba->
cnic->submit_kwqes)
474 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
489 struct kwqe *kwqe_arr[2];
495 destroy_req.
hdr.flags =
501 kwqe_arr[0] = (
struct kwqe *) &destroy_req;
503 if (hba->
cnic && hba->
cnic->submit_kwqes)
504 rc = hba->
cnic->submit_kwqes(hba->
cnic, kwqe_arr, num_kwqes);
515 if (blport->
lport == lport) {
534 lport = unsol_els->
lport;
536 hba = unsol_els->
hba;
537 if (is_valid_lport(hba, lport))
544 u32 frame_len,
u16 l2_oxid)
558 unsol_els = kzalloc(
sizeof(*unsol_els),
GFP_ATOMIC);
564 BNX2FC_TGT_DBG(tgt,
"l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
569 fp = fc_frame_alloc(lport, payload_len);
578 memcpy(fh, buf, frame_len);
589 op = fc_frame_payload_op(fp);
620 static void bnx2fc_process_unsol_compl(
struct bnx2fc_rport *tgt,
u16 wqe)
624 unsigned char *rq_data;
625 unsigned char *
buf =
NULL, *buf1;
635 u64 err_warn_bit_map;
639 BNX2FC_TGT_DBG(tgt,
"Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
662 for (i = 0; i < num_rq; i++) {
664 rq_data = (
unsigned char *)
668 memcpy(buf1, rq_data, len);
691 xid = err_entry->
fc_hdr.ox_id;
694 err_entry->
data.err_warn_bitmap_hi,
695 err_entry->
data.err_warn_bitmap_lo);
697 err_entry->
data.tx_buf_off, err_entry->
data.rx_buf_off);
710 task = &(task_page[index]);
724 "progress.. ignore unsol err\n");
728 err_warn_bit_map = (
u64)
729 ((
u64)err_entry->
data.err_warn_bitmap_hi << 32) |
730 (
u64)err_entry->
data.err_warn_bitmap_lo;
732 if (err_warn_bit_map & (
u64)((
u64)1 << i)) {
747 "in ABTS processing\n", xid);
794 "failed xid = 0x%x. issue cleanup\n",
815 err_entry->
data.err_warn_bitmap_hi,
816 err_entry->
data.err_warn_bitmap_lo);
818 err_entry->
data.tx_buf_off, err_entry->
data.rx_buf_off);
825 err_warn_bit_map = (
u64)
826 ((
u64)err_entry->
data.err_warn_bitmap_hi << 32) |
827 (
u64)err_entry->
data.err_warn_bitmap_lo;
829 if (err_warn_bit_map & (
u64) (1 <<
i)) {
840 task = &(task_page[index]);
895 task = &(task_page[index]);
897 num_rq = ((task->
rxwr_txrd.var_ctx.rx_flags &
912 rx_state = ((task->
rxwr_txrd.var_ctx.rx_flags &
990 msg = *((
u32 *)rx_db);
1003 INIT_LIST_HEAD(&work->
list);
1014 u32 num_free_sqes = 0;
1027 spin_unlock_bh(&tgt->
cq_lock);
1041 bnx2fc_process_unsol_compl(tgt, wqe);
1048 fps = &
per_cpu(bnx2fc_percpu, cpu);
1084 spin_unlock_bh(&tgt->
cq_lock);
1096 static void bnx2fc_fastpath_notification(
struct bnx2fc_hba *hba,
1119 static void bnx2fc_process_ofld_cmpl(
struct bnx2fc_hba *hba,
1139 interface = tgt->port->
priv;
1140 if (hba != interface->
hba) {
1160 rc = bnx2fc_send_session_enable_req(port, tgt);
1181 static void bnx2fc_process_enable_conn_cmpl(
struct bnx2fc_hba *hba,
1208 interface = tgt->port->
priv;
1209 if (hba != interface->
hba) {
1228 static void bnx2fc_process_conn_disable_cmpl(
struct bnx2fc_hba *hba,
1260 static void bnx2fc_process_conn_destroy_cmpl(
struct bnx2fc_hba *hba,
1330 while (i < num_cqe) {
1335 bnx2fc_fastpath_notification(hba, kcqe);
1339 bnx2fc_process_ofld_cmpl(hba, kcqe);
1343 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1349 bnx2fc_init_failure(hba,
1372 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1376 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1420 msg = *((
u32 *)sq_db);
1486 u16 orig_xid = orig_io_req->
xid;
1489 u32 orig_offset = offset;
1491 int orig_task_idx, index;
1506 task->
txwr_rxrd.const_ctx.init_flags = task_type <<
1510 task->
rxwr_txrd.const_ctx.init_flags = context_id <<
1512 task->
rxwr_txrd.const_ctx.init_flags = context_id <<
1515 task->
txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1517 task->
txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1520 bd_count = orig_io_req->
bd_tbl->bd_valid;
1523 for (i = 0; i < bd_count; i++) {
1524 if (offset < bd[i].buf_len)
1531 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1533 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1534 (
u32)((
u64)phys_addr >> 32);
1535 task->
txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1537 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1539 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx =
i;
1545 interface->
hba->task_ctx[orig_task_idx];
1546 orig_task = &(task_page[index]);
1549 sgl = &task->
rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1550 sgl->
mul_sgl.cur_sge_addr.lo = (
u32)phys_addr;
1552 sgl->
mul_sgl.sgl_size = bd_count;
1558 task->
rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1559 task->
rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1574 task->
txwr_rxrd.const_ctx.init_flags = task_type <<
1586 task->
txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1594 task->
rxwr_txrd.const_ctx.init_flags = context_id <<
1596 task->
rxwr_txrd.var_ctx.rx_flags |= 1 <<
1626 BNX2FC_IO_DBG(io_req,
"Init MP task for cmd_type = %d task_type = %d\n",
1632 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1634 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1636 task->
txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1641 task->
txwr_rxrd.const_ctx.init_flags = task_type <<
1662 task->
rxwr_txrd.var_ctx.rx_flags |= 1 <<
1666 task->
rxwr_txrd.const_ctx.init_flags = context_id <<
1679 hdr = (
u64 *) &task->
txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1680 memcpy(temp_hdr, fc_hdr,
sizeof(temp_hdr));
1687 sgl = &task->
rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1690 sgl->
mul_sgl.cur_sge_addr.hi =
1707 u64 tmp_fcp_cmnd[4];
1724 cached_sge = &task->
rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1726 if ((dev_type ==
TYPE_DISK) && (bd_count == 1)) {
1729 task->
txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1732 task->
txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1735 task->
txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1739 task->
txwr_rxrd.const_ctx.init_flags |= 1 <<
1742 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1744 task->
txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1746 task->
txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1753 task->
txwr_rxrd.const_ctx.init_flags |= task_type <<
1772 task->
txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1776 task->
txwr_rxrd.union_ctx.fcp_cmd.opaque;
1780 cnt =
sizeof(
struct fcp_cmnd) / sizeof(u64);
1782 for (i = 0; i <
cnt; i++) {
1791 task->
rxwr_txrd.const_ctx.init_flags = context_id <<
1796 task->
rxwr_txrd.var_ctx.rx_flags |= 1 <<
1805 sgl = &task->
rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1809 if (bd_count == 1) {
1816 task->
txwr_rxrd.const_ctx.init_flags |= 1 <<
1818 }
else if (bd_count == 2) {
1831 task->
txwr_rxrd.const_ctx.init_flags |= 1 <<
1836 sgl->
mul_sgl.cur_sge_addr.hi =
1838 sgl->
mul_sgl.sgl_size = bd_count;
1842 sgl->
mul_sgl.cur_sge_addr.hi =
1844 sgl->
mul_sgl.sgl_size = bd_count;
1975 static void bnx2fc_free_hash_table(
struct bnx2fc_hba *hba)
1979 int hash_table_size;
1987 for (i = 0; i < segment_count; ++
i) {
2009 static int bnx2fc_allocate_hash_table(
struct bnx2fc_hba *hba)
2012 int hash_table_size;
2014 int segment_array_size;
2015 int dma_segment_array_size;
2032 dma_segment_array_size = segment_count *
sizeof(*dma_segment_array);
2033 dma_segment_array = kzalloc(dma_segment_array_size,
GFP_KERNEL);
2034 if (!dma_segment_array) {
2039 for (i = 0; i < segment_count; ++
i) {
2042 BNX2FC_HASH_TBL_CHUNK_SIZE,
2043 &dma_segment_array[i],
2049 BNX2FC_HASH_TBL_CHUNK_SIZE,
2051 dma_segment_array[i]);
2054 kfree(dma_segment_array);
2058 BNX2FC_HASH_TBL_CHUNK_SIZE);
2067 kfree(dma_segment_array);
2073 for (i = 0; i < segment_count; ++
i) {
2082 while (*pbl && *(pbl + 1)) {
2091 kfree(dma_segment_array);
2107 if (bnx2fc_allocate_hash_table(hba))
2193 bnx2fc_free_hash_table(hba);