13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
23 #include <linux/netdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
54 #define DRV_MODULE_NAME "cnic"
77 lockdep_is_held(&cnic_lock));
80 static int cnic_service_bnx2(
void *,
void *);
81 static int cnic_service_bnx2x(
void *,
void *);
84 static struct cnic_ops cnic_bnx2_ops = {
86 .cnic_handler = cnic_service_bnx2,
90 static struct cnic_ops cnic_bnx2x_ops = {
92 .cnic_handler = cnic_service_bnx2x,
98 static void cnic_shutdown_rings(
struct cnic_dev *);
99 static void cnic_init_rings(
struct cnic_dev *);
100 static int cnic_cm_set_pg(
struct cnic_sock *);
123 cnic_shutdown_rings(dev);
124 cnic_init_rings(dev);
130 static int cnic_uio_close(
struct uio_info *uinfo,
struct inode *inode)
138 static inline void cnic_hold(
struct cnic_dev *dev)
143 static inline void cnic_put(
struct cnic_dev *dev)
148 static inline void csk_hold(
struct cnic_sock *csk)
153 static inline void csk_put(
struct cnic_sock *csk)
164 if (netdev == cdev->
netdev) {
174 static inline void ulp_get(
struct cnic_ulp_ops *ulp_ops)
179 static inline void ulp_put(
struct cnic_ulp_ops *ulp_ops)
228 static void cnic_reg_wr_ind(
struct cnic_dev *dev,
u32 off,
u32 val)
254 static void cnic_ulp_ctl(
struct cnic_dev *dev,
int ulp_type,
bool reg)
260 &
info.data.register_data.fcoe_features;
270 info.data.ulp_type = ulp_type;
274 static int cnic_in_use(
struct cnic_sock *csk)
298 if (cp->
ctx_tbl[i].cid == cid) {
317 if (!udev || udev->
uio_dev == -1)
321 len =
sizeof(path_req);
322 buf = (
char *) &path_req;
323 memset(&path_req, 0, len);
330 path_req.ip_addr_len = 16;
334 path_req.ip_addr_len = 4;
336 path_req.vlan_id = csk->
vlan_id;
337 path_req.pmtu = csk->
mtu;
360 static int cnic_iscsi_nl_msg_recv(
struct cnic_dev *dev,
u32 msg_type,
372 if (len <
sizeof(*path_resp))
389 if (cnic_in_use(csk) &&
402 if (is_valid_ether_addr(csk->
ha)) {
407 cnic_cm_upcall(cp, csk,
421 static int cnic_offld_prep(
struct cnic_sock *csk)
434 static int cnic_close_prep(
struct cnic_sock *csk)
448 static int cnic_abort_prep(
struct cnic_sock *csk)
469 pr_err(
"%s: Bad type %d\n", __func__, ulp_type);
473 if (cnic_ulp_tbl_prot(ulp_type)) {
474 pr_err(
"%s: Type %d has already been registered\n",
512 pr_err(
"%s: Bad type %d\n", __func__, ulp_type);
516 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
518 pr_err(
"%s: Type %d has not been registered\n",
527 pr_err(
"%s: Type %d still has devices registered\n",
545 pr_warn(
"%s: Failed waiting for ref count to go to zero\n",
554 static int cnic_start_hw(
struct cnic_dev *);
555 static void cnic_stop_hw(
struct cnic_dev *);
557 static int cnic_register_device(
struct cnic_dev *dev,
int ulp_type,
564 pr_err(
"%s: Bad type %d\n", __func__, ulp_type);
568 if (cnic_ulp_tbl_prot(ulp_type) ==
NULL) {
569 pr_err(
"%s: Driver with type %d has not been registered\n",
575 pr_err(
"%s: Type %d has already been registered to this device\n",
583 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593 cnic_ulp_ctl(dev, ulp_type,
true);
600 static int cnic_unregister_device(
struct cnic_dev *dev,
int ulp_type)
606 pr_err(
"%s: Bad type %d\n", __func__, ulp_type);
614 pr_err(
"%s: device not registered to this ulp type %d\n",
634 netdev_warn(dev->
netdev,
"Failed waiting for ULP up call to complete\n");
636 cnic_ulp_ctl(dev, ulp_type,
false);
645 id_tbl->
start = start_id;
656 static void cnic_free_id_tbl(
struct cnic_id_tbl *id_tbl)
667 if (
id >= id_tbl->
max)
670 spin_lock(&id_tbl->
lock);
675 spin_unlock(&id_tbl->
lock);
684 spin_lock(&id_tbl->
lock);
686 if (
id >= id_tbl->
max) {
688 if (id_tbl->
next != 0) {
690 if (
id >= id_tbl->
next)
697 id_tbl->
next = (
id + 1) & (id_tbl->
max - 1);
701 spin_unlock(&id_tbl->
lock);
712 if (
id >= id_tbl->
max)
756 static void cnic_setup_page_tbl_le(
struct cnic_dev *dev,
struct cnic_dma *dma)
771 int pages,
int use_pg_tbl)
776 size = pages * (
sizeof(
void *) +
sizeof(
dma_addr_t));
784 for (i = 0; i <
pages; i++) {
807 cnic_free_dma(dev, dma);
811 static void cnic_free_context(
struct cnic_dev *dev)
816 for (i = 0; i < cp->
ctx_blks; i++) {
826 static void __cnic_free_uio_rings(
struct cnic_uio_dev *udev)
846 __cnic_free_uio_rings(udev);
858 list_del_init(&udev->
list);
860 __cnic_free_uio(udev);
863 static void cnic_free_resc(
struct cnic_dev *dev)
872 __cnic_free_uio_rings(udev);
875 cnic_free_context(dev);
883 cnic_free_dma(dev, &cp->
kcq2.dma);
884 cnic_free_dma(dev, &cp->
kcq1.dma);
891 cnic_free_id_tbl(&cp->
cid_tbl);
894 static int cnic_alloc_context(
struct cnic_dev *dev)
910 for (i = 0; i < 2; i++) {
918 reg = cnic_reg_rd_ind(dev, off);
931 for (i = 0; i < cp->
ctx_blks; i++) {
963 static u16 cnic_bnx2x_hw_idx(
u16 idx)
973 int err,
i, use_page_tbl = 0;
983 kcq = (
struct kcqe **) info->
dma.pg_arr;
986 info->
next_idx = cnic_bnx2_next_idx;
987 info->
hw_idx = cnic_bnx2_hw_idx;
991 info->
next_idx = cnic_bnx2x_next_idx;
992 info->
hw_idx = cnic_bnx2x_hw_idx;
999 if (j >= KCQ_PAGE_CNT)
1002 next->
addr_lo = info->
dma.pg_map_arr[j] & 0xffffffff;
1007 static int __cnic_alloc_uio_rings(
struct cnic_uio_dev *udev,
int pages)
1027 __cnic_free_uio_rings(udev);
1035 static int cnic_alloc_uio_rings(
struct cnic_dev *dev,
int pages)
1044 if (__cnic_alloc_uio_rings(udev, pages)) {
1065 if (__cnic_alloc_uio_rings(udev, pages))
1069 list_add(&udev->
list, &cnic_udev_list);
1083 static int cnic_init_uio(
struct cnic_dev *dev)
1109 uinfo->
name =
"bnx2_cnic";
1117 uinfo->
name =
"bnx2x_cnic";
1126 uinfo->
mem[3].addr = (
unsigned long) udev->
l2_buf;
1133 uinfo->
open = cnic_uio_open;
1134 uinfo->
release = cnic_uio_close;
1143 cnic_init_rings(dev);
1149 static int cnic_alloc_bnx2_resc(
struct cnic_dev *dev)
1159 ret = cnic_alloc_kcq(dev, &cp->
kcq1,
true);
1163 ret = cnic_alloc_context(dev);
1167 ret = cnic_alloc_uio_rings(dev, 2);
1171 ret = cnic_init_uio(dev);
1178 cnic_free_resc(dev);
1182 static int cnic_alloc_bnx2x_context(
struct cnic_dev *dev)
1186 int total_mem, blks,
i;
1190 if (total_mem % ctx_blk_size)
1193 if (blks > cp->
ethdev->ctx_tbl_len)
1209 for (i = 0; i < blks; i++) {
1219 cnic_free_context(dev);
1229 static int cnic_alloc_bnx2x_resc(
struct cnic_dev *dev)
1271 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1283 if ((i % n) == (n - 1))
1287 ret = cnic_alloc_kcq(dev, &cp->
kcq1,
false);
1292 ret = cnic_alloc_kcq(dev, &cp->
kcq2,
true);
1298 ret = cnic_alloc_dma(dev, &cp->
gbl_buf_info, pages, 0);
1302 ret = cnic_alloc_bnx2x_context(dev);
1313 ret = cnic_alloc_uio_rings(dev, 4);
1317 ret = cnic_init_uio(dev);
1324 cnic_free_resc(dev);
1334 static int cnic_submit_bnx2_kwqes(
struct cnic_dev *dev,
struct kwqe *wqes[],
1338 struct kwqe *prod_qe;
1339 u16 prod, sw_prod,
i;
1345 if (num_wqes > cnic_kwq_avail(cp) &&
1355 for (i = 0; i < num_wqes; i++) {
1357 memcpy(prod_qe, wqes[i],
sizeof(
struct kwqe));
1369 static void *cnic_get_kwqe_16_data(
struct cnic_local *cp,
u32 l5_cid,
1381 static int cnic_submit_kwqe_16(
struct cnic_dev *dev,
u32 cmd,
u32 cid,
1390 kwqe.hdr.conn_and_cmd_data =
1399 kwqe.hdr.reserved1 = 0;
1406 ret = cp->
ethdev->drv_submit_kwqes_16(dev->
netdev, kwq, 1);
1415 static void cnic_reply_bnx2x_kcqes(
struct cnic_dev *dev,
int ulp_type,
1416 struct kcqe *cqes[],
u32 num_cqes)
1460 req1->num_tasks_per_conn);
1465 req1->rq_buffer_size);
1472 req1->num_tasks_per_conn);
1487 req1->num_tasks_per_conn);
1491 req1->num_tasks_per_conn);
1502 req1->num_tasks_per_conn);
1511 static int cnic_bnx2x_iscsi_init2(
struct cnic_dev *dev,
struct kwqe *kwqe)
1517 struct kcqe *cqes[1];
1521 kcqe.completion_status =
1553 static void cnic_free_bnx2x_conn_resc(
struct cnic_dev *dev,
u32 l5_cid)
1561 cnic_free_dma(dev, &iscsi->
hq_info);
1572 static int cnic_alloc_bnx2x_conn_resc(
struct cnic_dev *dev,
u32 l5_cid)
1590 cid = cnic_alloc_new_id(&cp->
cid_tbl);
1604 ret = cnic_alloc_dma(dev, &iscsi->
r2tq_info, pages, 1);
1609 ret = cnic_alloc_dma(dev, &iscsi->
hq_info, pages, 1);
1616 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1620 static void *cnic_get_bnx2x_ctx(
struct cnic_dev *dev,
u32 cid,
int init,
1627 unsigned long align_off = 0;
1634 if (cp->
ctx_arr[blk].mapping & mask)
1638 ctx_map = cp->
ctx_arr[
blk].mapping + align_off +
1645 ctx_addr->
lo = ctx_map & 0xffffffff;
1646 ctx_addr->
hi = (
u64) ctx_map >> 32;
1650 static int cnic_setup_bnx2x_ctx(
struct cnic_dev *dev,
struct kwqe *wqes[],
1665 int i,
j, n = 2, n_max;
1674 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1693 iscsi->
hq_info.pgtbl_map & 0xffffffff;
1701 iscsi->
r2tq_info.pgtbl_map & 0xffffffff;
1726 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1728 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1752 iscsi->
r2tq_info.pgtbl_map & 0xffffffff;
1771 for (i = 1, j = 1; i < cp->
num_cqs; i++, j++) {
1803 iscsi->
hq_info.pgtbl_map & 0xffffffff;
1819 for (i = 0; i < cp->
num_cqs; i++) {
1836 static int cnic_bnx2x_iscsi_ofld1(
struct cnic_dev *dev,
struct kwqe *wqes[],
1844 struct kcqe *cqes[1];
1861 l5_cid = req1->iscsi_conn_id;
1862 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1867 kcqe.iscsi_conn_id = l5_cid;
1872 kcqe.completion_status =
1881 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1887 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1889 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1904 static int cnic_bnx2x_iscsi_update(
struct cnic_dev *dev,
struct kwqe *kwqe)
1914 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1917 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1921 memcpy(data, kwqe,
sizeof(
struct kwqe));
1928 static int cnic_bnx2x_destroy_ramrod(
struct cnic_dev *dev,
u32 l5_cid)
1938 memset(&l5_data, 0,
sizeof(l5_data));
1953 static int cnic_bnx2x_iscsi_destroy(
struct cnic_dev *dev,
struct kwqe *kwqe)
1958 u32 l5_cid = req->reserved0;
1962 struct kcqe *cqes[1];
1965 goto skip_cfc_delete;
1970 if (delta > (2 *
HZ))
1978 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1981 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1991 kcqe.iscsi_conn_id = l5_cid;
2001 static void cnic_init_storm_conn_bufs(
struct cnic_dev *dev,
2018 for (i = 0; i < 4; i++, addrp++)
2022 for (i = 0; i < 4; i++, addrp++)
2025 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2029 xstorm_buf->mss = 0xffff;
2031 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2032 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2033 xstorm_buf->pseudo_header_checksum =
2036 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2037 tstorm_buf->params |=
2038 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2040 tstorm_buf->ka_enable = 1;
2043 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2048 static void cnic_init_bnx2x_mac(
struct cnic_dev *dev)
2084 static void cnic_bnx2x_set_tcp_timestamp(
struct cnic_dev *dev,
int tcp_ts)
2088 u16 tstorm_flags = 0;
2102 static int cnic_bnx2x_connect(
struct cnic_dev *dev,
struct kwqe *wqes[],
2122 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2133 netdev_err(dev->
netdev,
"conn_buf size too big\n");
2136 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2140 memset(conn_buf, 0,
sizeof(*conn_buf));
2143 conn_addr->remote_addr_0 = csk->
ha[0];
2144 conn_addr->remote_addr_1 = csk->
ha[1];
2145 conn_addr->remote_addr_2 = csk->
ha[2];
2146 conn_addr->remote_addr_3 = csk->
ha[3];
2147 conn_addr->remote_addr_4 = csk->
ha[4];
2148 conn_addr->remote_addr_5 = csk->
ha[5];
2150 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2161 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2167 conn_addr->local_tcp_port = kwqe1->src_port;
2168 conn_addr->remote_tcp_port = kwqe1->dst_port;
2170 conn_addr->
pmtu = kwqe3->pmtu;
2171 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2176 cnic_bnx2x_set_tcp_timestamp(dev,
2177 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2187 static int cnic_bnx2x_close(
struct cnic_dev *dev,
struct kwqe *kwqe)
2193 memset(&l5_data, 0,
sizeof(l5_data));
2199 static int cnic_bnx2x_reset(
struct cnic_dev *dev,
struct kwqe *kwqe)
2205 memset(&l5_data, 0,
sizeof(l5_data));
2210 static int cnic_bnx2x_offload_pg(
struct cnic_dev *dev,
struct kwqe *kwqe)
2214 struct kcqe *cqes[1];
2221 cnic_reply_bnx2x_kcqes(dev,
CNIC_ULP_L4, cqes, 1);
2225 static int cnic_bnx2x_update_pg(
struct cnic_dev *dev,
struct kwqe *kwqe)
2229 struct kcqe *cqes[1];
2236 cnic_reply_bnx2x_kcqes(dev,
CNIC_ULP_L4, cqes, 1);
2240 static int cnic_bnx2x_fcoe_stat(
struct cnic_dev *dev,
struct kwqe *kwqe)
2256 memset(fcoe_stat, 0,
sizeof(*fcoe_stat));
2264 static int cnic_bnx2x_fcoe_init1(
struct cnic_dev *dev,
struct kwqe *wqes[],
2293 netdev_err(dev->
netdev,
"fcoe_init size too big\n");
2300 memset(fcoe_init, 0,
sizeof(*fcoe_init));
2311 cp->
kcq2.sw_prod_idx = 0;
2320 static int cnic_bnx2x_fcoe_ofld1(
struct cnic_dev *dev,
struct kwqe *wqes[],
2324 u32 cid = -1, l5_cid;
2336 struct kcqe *cqes[1];
2359 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2366 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2379 netdev_err(dev->
netdev,
"fcoe_offload size too big\n");
2382 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2386 memset(fcoe_offload, 0,
sizeof(*fcoe_offload));
2402 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2414 static int cnic_bnx2x_fcoe_enable(
struct cnic_dev *dev,
struct kwqe *kwqe)
2428 netdev_err(dev->
netdev,
"fcoe_enable size too big\n");
2431 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2435 memset(fcoe_enable, 0,
sizeof(*fcoe_enable));
2442 static int cnic_bnx2x_fcoe_disable(
struct cnic_dev *dev,
struct kwqe *kwqe)
2460 netdev_err(dev->
netdev,
"fcoe_disable size too big\n");
2463 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2467 memset(fcoe_disable, 0,
sizeof(*fcoe_disable));
2474 static int cnic_bnx2x_fcoe_destroy(
struct cnic_dev *dev,
struct kwqe *kwqe)
2483 struct kcqe *cqes[1];
2500 memset(&l5_data, 0,
sizeof(l5_data));
2506 kcqe.completion_status = 0;
2514 kcqe.fcoe_conn_context_id =
cid;
2521 static void cnic_bnx2x_delete_wait(
struct cnic_dev *dev,
u32 start_cid)
2533 for (j = 0; j < 5; j++) {
2540 netdev_warn(dev->
netdev,
"CID %x not deleted\n",
2545 static int cnic_bnx2x_fcoe_fw_destroy(
struct cnic_dev *dev,
struct kwqe *kwqe)
2553 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2558 memset(&l5_data, 0,
sizeof(l5_data));
2564 static void cnic_bnx2x_kwqe_err(
struct cnic_dev *dev,
struct kwqe *kwqe)
2568 struct kcqe *cqes[1];
2634 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2637 static int cnic_submit_bnx2x_iscsi_kwqes(
struct cnic_dev *dev,
2638 struct kwqe *wqes[],
u32 num_wqes)
2647 for (i = 0; i < num_wqes; ) {
2654 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2657 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2660 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2661 num_wqes - i, &work);
2664 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2667 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2670 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2674 ret = cnic_bnx2x_close(dev, kwqe);
2677 ret = cnic_bnx2x_reset(dev, kwqe);
2680 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2683 ret = cnic_bnx2x_update_pg(dev, kwqe);
2690 netdev_err(dev->
netdev,
"Unknown type of KWQE(0x%x)\n",
2695 netdev_err(dev->
netdev,
"KWQE(0x%x) failed\n",
2703 cnic_bnx2x_kwqe_err(dev, kwqe);
2710 static int cnic_submit_bnx2x_fcoe_kwqes(
struct cnic_dev *dev,
2711 struct kwqe *wqes[],
u32 num_wqes)
2724 for (i = 0; i < num_wqes; ) {
2731 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2732 num_wqes - i, &work);
2735 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2736 num_wqes - i, &work);
2739 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2742 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2745 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2748 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2751 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2755 netdev_err(dev->
netdev,
"Unknown type of KWQE(0x%x)\n",
2760 netdev_err(dev->
netdev,
"KWQE(0x%x) failed\n",
2768 cnic_bnx2x_kwqe_err(dev, kwqe);
2775 static int cnic_submit_bnx2x_kwqes(
struct cnic_dev *dev,
struct kwqe *wqes[],
2788 switch (layer_code) {
2792 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2796 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2802 static inline u32 cnic_get_kcqe_layer_mask(
u32 opflag)
2810 static void service_kcqes(
struct cnic_dev *dev,
int num_cqes)
2821 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2826 while (j < num_cqes) {
2829 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2832 if (
unlikely(next_op & KCQE_RAMROD_COMPLETION))
2848 netdev_err(dev->
netdev,
"Unknown type of KCQE(0x%x)\n",
2874 int kcqe_cnt = 0, last_cnt = 0;
2879 hw_prod = info->
hw_idx(hw_prod);
2887 last_cnt = kcqe_cnt;
2896 static int cnic_l2_completion(
struct cnic_local *cp)
2898 u16 hw_cons, sw_cons;
2901 (udev->
l2_ring + (2 * BCM_PAGE_SIZE));
2913 while (sw_cons != hw_cons) {
2930 static void cnic_chk_pkt_rings(
struct cnic_local *cp)
2932 u16 rx_cons, tx_cons;
2942 comp = cnic_l2_completion(cp);
2954 static u32 cnic_service_bnx2_queues(
struct cnic_dev *dev)
2957 u32 status_idx = (
u16) *cp->
kcq1.status_idx_ptr;
2964 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->
kcq1))) {
2966 service_kcqes(dev, kcqe_cnt);
2970 status_idx = (
u16) *cp->
kcq1.status_idx_ptr;
2978 cnic_chk_pkt_rings(cp);
2983 static int cnic_service_bnx2(
void *data,
void *
status_blk)
2990 return sblk->status_idx;
2993 return cnic_service_bnx2_queues(dev);
2996 static void cnic_service_bnx2_msix(
unsigned long data)
3007 static void cnic_doirq(
struct cnic_dev *dev)
3023 struct cnic_dev *dev = dev_instance;
3034 static inline void cnic_ack_bnx2x_int(
struct cnic_dev *dev,
u8 id,
u8 storm,
3042 igu_ack.status_block_index =
index;
3043 igu_ack.sb_id_and_flags =
3044 ((
id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3045 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3046 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3047 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3058 cmd_data.sb_id_and_flags =
3065 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3068 static void cnic_ack_bnx2x_msix(
struct cnic_dev *dev)
3076 static void cnic_ack_bnx2x_e2_msix(
struct cnic_dev *dev)
3084 static void cnic_arm_bnx2x_msix(
struct cnic_dev *dev,
u32 idx)
3092 static void cnic_arm_bnx2x_e2_msix(
struct cnic_dev *dev,
u32 idx)
3107 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3109 service_kcqes(dev, kcqe_cnt);
3121 static void cnic_service_bnx2x_bh(
unsigned long data)
3125 u32 status_idx, new_status_idx;
3131 status_idx = cnic_service_bnx2x_kcq(dev, &cp->
kcq1);
3134 cp->
kcq1.sw_prod_idx + MAX_KCQ_IDX);
3141 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->
kcq2);
3143 if (new_status_idx != status_idx)
3156 static int cnic_service_bnx2x(
void *data,
void *status_blk)
3164 cnic_chk_pkt_rings(cp);
3169 static void cnic_ulp_stop_one(
struct cnic_local *cp,
int if_type)
3178 lockdep_is_held(&cnic_lock));
3192 static void cnic_ulp_stop(
struct cnic_dev *dev)
3198 cnic_ulp_stop_one(cp, if_type);
3201 static void cnic_ulp_start(
struct cnic_dev *dev)
3211 lockdep_is_held(&cnic_lock));
3226 static int cnic_copy_ulp_stats(
struct cnic_dev *dev,
int ulp_type)
3233 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3247 switch (info->
cmd) {
3259 if (!cnic_start_hw(dev))
3260 cnic_ulp_start(dev);
3279 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3285 "CID %x CFC delete comp error %x\n",
3299 cnic_copy_ulp_stats(dev, ulp_type);
3309 static void cnic_ulp_init(
struct cnic_dev *dev)
3318 ulp_ops = cnic_ulp_tbl_prot(i);
3333 static void cnic_ulp_exit(
struct cnic_dev *dev)
3342 ulp_ops = cnic_ulp_tbl_prot(i);
3357 static int cnic_cm_offload_pg(
struct cnic_sock *csk)
3361 struct kwqe *wqes[1];
3364 memset(l4kwqe, 0,
sizeof(*l4kwqe));
3365 wqes[0] = (
struct kwqe *) l4kwqe;
3372 l4kwqe->da0 = csk->
ha[0];
3373 l4kwqe->da1 = csk->
ha[1];
3374 l4kwqe->da2 = csk->
ha[2];
3375 l4kwqe->da3 = csk->
ha[3];
3376 l4kwqe->da4 = csk->
ha[4];
3377 l4kwqe->da5 = csk->
ha[5];
3391 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3392 l4kwqe->vlan_tag = csk->
vlan_id;
3393 l4kwqe->l2hdr_nbytes += 4;
3399 static int cnic_cm_update_pg(
struct cnic_sock *csk)
3403 struct kwqe *wqes[1];
3406 memset(l4kwqe, 0,
sizeof(*l4kwqe));
3407 wqes[0] = (
struct kwqe *) l4kwqe;
3414 l4kwqe->da0 = csk->
ha[0];
3415 l4kwqe->da1 = csk->
ha[1];
3416 l4kwqe->da2 = csk->
ha[2];
3417 l4kwqe->da3 = csk->
ha[3];
3418 l4kwqe->da4 = csk->
ha[4];
3419 l4kwqe->da5 = csk->
ha[5];
3422 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3427 static int cnic_cm_upload_pg(
struct cnic_sock *csk)
3431 struct kwqe *wqes[1];
3434 memset(l4kwqe, 0,
sizeof(*l4kwqe));
3435 wqes[0] = (
struct kwqe *) l4kwqe;
3445 static int cnic_cm_conn_req(
struct cnic_sock *csk)
3451 struct kwqe *wqes[3];
3458 memset(l4kwqe1, 0,
sizeof(*l4kwqe1));
3459 memset(l4kwqe2, 0,
sizeof(*l4kwqe2));
3460 memset(l4kwqe3, 0,
sizeof(*l4kwqe3));
3468 l4kwqe3->tos = csk->
tos;
3469 l4kwqe3->ttl = csk->
ttl;
3471 l4kwqe3->pmtu = csk->
mtu;
3476 wqes[0] = (
struct kwqe *) l4kwqe1;
3478 wqes[1] = (
struct kwqe *) l4kwqe2;
3479 wqes[2] = (
struct kwqe *) l4kwqe3;
3482 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3485 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3493 l4kwqe3->mss = l4kwqe3->pmtu -
sizeof(
struct ipv6hdr) -
3496 wqes[1] = (
struct kwqe *) l4kwqe3;
3497 l4kwqe3->mss = l4kwqe3->pmtu -
sizeof(
struct iphdr) -
3504 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3512 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3514 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3516 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3518 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3520 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3522 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3524 l4kwqe1->tcp_flags = tcp_flags;
3529 static int cnic_cm_close_req(
struct cnic_sock *csk)
3533 struct kwqe *wqes[1];
3536 memset(l4kwqe, 0,
sizeof(*l4kwqe));
3537 wqes[0] = (
struct kwqe *) l4kwqe;
3540 l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3546 static int cnic_cm_abort_req(
struct cnic_sock *csk)
3550 struct kwqe *wqes[1];
3553 memset(l4kwqe, 0,
sizeof(*l4kwqe));
3554 wqes[0] = (
struct kwqe *) l4kwqe;
3557 l4kwqe->flags =
L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3563 static int cnic_cm_create(
struct cnic_dev *dev,
int ulp_type,
u32 cid,
3606 static void cnic_cm_cleanup(
struct cnic_sock *csk)
3617 static void cnic_close_conn(
struct cnic_sock *csk)
3620 cnic_cm_upload_pg(csk);
3623 cnic_cm_cleanup(csk);
3626 static int cnic_cm_destroy(
struct cnic_sock *csk)
3628 if (!cnic_in_use(csk))
3636 cnic_cm_cleanup(csk);
3657 #if defined(CONFIG_INET)
3671 static int cnic_get_v6_route(
struct sockaddr_in6 *dst_addr,
3674 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3677 memset(&fl6, 0,
sizeof(fl6));
3683 if ((*dst)->error) {
3703 err = cnic_get_v4_route(dst_addr, &dst);
3708 err = cnic_get_v6_route(dst_addr6, &dst);
3718 cnic_get_vlan(dst->
dev, &netdev);
3720 dev = cnic_from_netdev(netdev);
3760 cnic_get_v6_route(&saddr->
remote.
v6, &dst);
3765 local_port = saddr->
local.
v6.sin6_port;
3768 cnic_get_v4_route(&saddr->
remote.
v4, &dst);
3772 local_port = saddr->
local.
v4.sin_port;
3777 if (dst && dst->
dev) {
3778 u16 vlan = cnic_get_vlan(dst->
dev, &realdev);
3779 if (realdev == dev->
netdev) {
3781 csk->
mtu = dst_mtu(dst);
3795 if (port_id == -1) {
3808 static void cnic_init_csk_state(
struct cnic_sock *csk)
3823 if (!cnic_in_use(csk))
3829 cnic_init_csk_state(csk);
3831 err = cnic_get_route(csk, saddr);
3835 err = cnic_resolve_addr(csk, saddr);
3844 static int cnic_cm_abort(
struct cnic_sock *csk)
3849 if (!cnic_in_use(csk))
3852 if (cnic_abort_prep(csk))
3853 return cnic_cm_abort_req(csk);
3860 if (csk->
state != opcode)
3866 static int cnic_cm_close(
struct cnic_sock *csk)
3868 if (!cnic_in_use(csk))
3871 if (cnic_close_prep(csk)) {
3873 return cnic_cm_close_req(csk);
3903 static int cnic_cm_set_pg(
struct cnic_sock *csk)
3905 if (cnic_offld_prep(csk)) {
3907 cnic_cm_update_pg(csk);
3909 cnic_cm_offload_pg(csk);
3914 static void cnic_cm_process_offld_pg(
struct cnic_dev *dev,
struct l4_kcq *kcqe)
3918 u8 opcode = kcqe->op_code;
3922 if (!cnic_in_use(csk))
3932 cnic_cm_upcall(cp, csk,
3939 cnic_cm_conn_req(csk);
3945 static void cnic_process_fcoe_term_conn(
struct cnic_dev *dev,
struct kcqe *kcqe)
3957 static void cnic_cm_process_kcqe(
struct cnic_dev *dev,
struct kcqe *kcqe)
3961 u8 opcode = l4kcqe->op_code;
3966 cnic_process_fcoe_term_conn(dev, kcqe);
3971 cnic_cm_process_offld_pg(dev, l4kcqe);
3977 l5_cid = l4kcqe->
cid;
3984 if (!cnic_in_use(csk)) {
3991 if (l4kcqe->status != 0) {
3993 cnic_cm_upcall(cp, csk,
3998 if (l4kcqe->status == 0)
4000 else if (l4kcqe->status ==
4006 cnic_cm_upcall(cp, csk, opcode);
4010 if (l4kcqe->status != 0) {
4011 netdev_warn(dev->
netdev,
"RAMROD CLOSE compl with "
4012 "status 0x%x\n", l4kcqe->status);
4036 cnic_cm_upcall(cp, csk, opcode);
4042 static void cnic_cm_indicate_kcqe(
void *data,
struct kcqe *kcqe[],
u32 num)
4047 for (i = 0; i < num; i++)
4048 cnic_cm_process_kcqe(dev, kcqe[i]);
4052 .indicate_kcqes = cnic_cm_indicate_kcqe,
4055 static void cnic_cm_free_mem(
struct cnic_dev *dev)
4064 static int cnic_cm_alloc_mem(
struct cnic_dev *dev)
4078 cnic_cm_free_mem(dev);
4084 static int cnic_ready_to_close(
struct cnic_sock *csk,
u32 opcode)
4098 if (opcode == csk->
state || csk->
state == 0 ||
4102 if (csk->
state == 0)
4110 static void cnic_close_bnx2_conn(
struct cnic_sock *csk,
u32 opcode)
4116 cnic_cm_upcall(cp, csk, opcode);
4121 cnic_close_conn(csk);
4123 cnic_cm_upcall(cp, csk, opcode);
4126 static void cnic_cm_stop_bnx2_hw(
struct cnic_dev *dev)
4130 static int cnic_cm_init_bnx2_hw(
struct cnic_dev *dev)
4135 cnic_ctx_wr(dev, 45, 0, seed);
4139 static void cnic_close_bnx2x_conn(
struct cnic_sock *csk,
u32 opcode)
4146 int close_complete = 0;
4152 if (cnic_ready_to_close(csk, opcode)) {
4169 memset(&l5_data, 0,
sizeof(l5_data));
4173 }
else if (close_complete) {
4175 cnic_close_conn(csk);
4176 cnic_cm_upcall(cp, csk, csk->
state);
4180 static void cnic_cm_stop_bnx2x_hw(
struct cnic_dev *dev)
4187 if (!netif_running(dev->
netdev))
4190 cnic_bnx2x_delete_wait(dev, 0);
4196 netdev_warn(dev->
netdev,
"%d iSCSI connections not destroyed\n",
4200 static int cnic_cm_init_bnx2x_hw(
struct cnic_dev *dev)
4206 cnic_init_bnx2x_mac(dev);
4207 cnic_bnx2x_set_tcp_timestamp(dev, 1);
4232 static void cnic_delete_task(
struct work_struct *work)
4237 int need_resched = 0;
4267 err = cnic_bnx2x_destroy_ramrod(dev, i);
4269 cnic_free_bnx2x_conn_resc(dev, i);
4284 static int cnic_cm_open(
struct cnic_dev *dev)
4289 err = cnic_cm_alloc_mem(dev);
4312 cnic_cm_free_mem(dev);
4316 static int cnic_cm_shutdown(
struct cnic_dev *dev)
4328 cnic_cm_cleanup(csk);
4330 cnic_cm_free_mem(dev);
4335 static void cnic_init_context(
struct cnic_dev *dev,
u32 cid)
4343 cnic_ctx_wr(dev, cid_addr, i, 0);
4346 static int cnic_setup_5709_context(
struct cnic_dev *dev,
int valid)
4355 for (i = 0; i < cp->
ctx_blks; i++) {
4363 (cp->
ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4368 for (j = 0; j < 10; j++) {
4383 static void cnic_free_irq(
struct cnic_dev *dev)
4395 static int cnic_request_irq(
struct cnic_dev *dev)
4408 static int cnic_init_bnx2_irq(
struct cnic_dev *dev)
4427 (
unsigned long) dev);
4428 err = cnic_request_irq(dev);
4435 1 << (11 + sblk_num));
4450 while (sblk->status_completion_producer_index && i < 10) {
4457 if (sblk->status_completion_producer_index)
4464 netdev_err(dev->
netdev,
"KCQ index not resetting to 0\n");
4468 static void cnic_enable_bnx2_int(
struct cnic_dev *dev)
4480 static void cnic_disable_bnx2_int_sync(
struct cnic_dev *dev)
4494 static void cnic_init_bnx2_tx_ring(
struct cnic_dev *dev)
4499 u32 cid_addr, tx_cid, sb_id;
4508 cp->
tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4515 cp->
tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4524 cnic_ctx_wr(dev, cid_addr2, i, 0);
4531 cnic_init_context(dev, tx_cid);
4532 cnic_init_context(dev, tx_cid + 1);
4540 cnic_ctx_wr(dev, cid_addr, offset0, val);
4543 cnic_ctx_wr(dev, cid_addr, offset1, val);
4552 val = (
u64) ring_map >> 32;
4553 cnic_ctx_wr(dev, cid_addr, offset2, val);
4556 val = (
u64) ring_map & 0xffffffff;
4557 cnic_ctx_wr(dev, cid_addr, offset3, val);
4561 static void cnic_init_bnx2_rx_ring(
struct cnic_dev *dev)
4566 u32 cid_addr, sb_id,
val, coal_reg, coal_val;
4573 cnic_init_context(dev, 2);
4574 cp->
rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4576 coal_val =
CNIC_RD(dev, coal_reg);
4580 cp->
rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4582 coal_val = 1 << (11 + sb_id);
4586 CNIC_WR(dev, coal_reg, coal_val);
4615 val = (
u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4619 val = (
u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4627 static void cnic_shutdown_bnx2_rx_ring(
struct cnic_dev *dev)
4629 struct kwqe *wqes[1], l2kwqe;
4631 memset(&l2kwqe, 0,
sizeof(l2kwqe));
4639 static void cnic_set_bnx2_mac(
struct cnic_dev *dev)
4644 val = cp->
func << 2;
4673 static int cnic_start_bnx2_hw(
struct cnic_dev *dev)
4678 u32 val, kcq_cid_addr, kwq_cid_addr;
4681 cnic_set_bnx2_mac(dev);
4686 val |= (12 - 8) << 4;
4696 err = cnic_setup_5709_context(dev, 1);
4700 cnic_init_context(dev,
KWQ_CID);
4701 cnic_init_context(dev,
KCQ_CID);
4721 val = (BCM_PAGE_SIZE /
sizeof(
struct kwqe) - 1) << 16;
4724 val = ((BCM_PAGE_SIZE /
sizeof(
struct kwqe)) << 16) |
KWQ_PAGE_CNT;
4736 cp->
kcq1.sw_prod_idx = 0;
4737 cp->
kcq1.hw_prod_idx_ptr =
4738 &sblk->status_completion_producer_index;
4740 cp->
kcq1.status_idx_ptr = &sblk->status_idx;
4747 val = (BCM_PAGE_SIZE /
sizeof(
struct kcqe) - 1) << 16;
4750 val = ((BCM_PAGE_SIZE /
sizeof(
struct kcqe)) << 16) | KCQ_PAGE_CNT;
4753 val = (
u32) ((
u64) cp->
kcq1.dma.pgtbl_map >> 32);
4756 val = (
u32) cp->
kcq1.dma.pgtbl_map;
4765 cp->
kcq1.hw_prod_idx_ptr =
4766 &msblk->status_completion_producer_index;
4767 cp->
kcq1.status_idx_ptr = &msblk->status_idx;
4793 cnic_init_bnx2_tx_ring(dev);
4794 cnic_init_bnx2_rx_ring(dev);
4796 err = cnic_init_bnx2_irq(dev);
4798 netdev_err(dev->
netdev,
"cnic_init_irq failed\n");
4807 static void cnic_setup_bnx2x_context(
struct cnic_dev *dev)
4814 for (i = 0; i < cp->
ctx_blks; i++) {
4821 map = (map +
mask) & ~mask;
4824 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4828 static int cnic_init_bnx2x_irq(
struct cnic_dev *dev)
4835 (
unsigned long) dev);
4837 err = cnic_request_irq(dev);
4842 static inline void cnic_storm_memset_hc_disable(
struct cnic_dev *dev,
4843 u16 sb_id,
u8 sb_index,
4854 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4855 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4856 HC_INDEX_DATA_HC_ENABLED);
4860 static void cnic_enable_bnx2x_int(
struct cnic_dev *dev)
4873 static void cnic_disable_bnx2x_int_sync(
struct cnic_dev *dev)
4877 static void cnic_init_bnx2x_tx_ring(
struct cnic_dev *dev,
4886 u32 cli = cp->
ethdev->iscsi_l2_client_id;
4889 memset(txbd, 0, BCM_PAGE_SIZE);
4895 &((txbd + 1)->parse_bd_e1x);
4897 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4917 val = (
u64) ring_map >> 32;
4922 val = (
u64) ring_map & 0xffffffff;
4933 data->
general.statistics_zero_flg = 1;
4934 data->
general.statistics_en_flg = 1;
4935 data->
general.statistics_counter_id = cli;
4942 static void cnic_init_bnx2x_rx_ring(
struct cnic_dev *dev,
4950 (udev->
l2_ring + (2 * BCM_PAGE_SIZE));
4953 u32 cli = cp->
ethdev->iscsi_l2_client_id;
4959 data->
general.client_id = cli;
4960 data->
general.activate_flg = 1;
4961 data->
general.sp_client_id = cli;
4974 val = (
u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4978 val = (
u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4983 val = (
u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4987 val = (
u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4992 data->
rx.client_qzone_id = cl_qzone_id;
4999 data->
rx.outer_vlan_removal_enable_flg = 1;
5000 data->
rx.silent_vlan_removal_flg = 1;
5001 data->
rx.silent_vlan_value = 0;
5002 data->
rx.silent_vlan_mask = 0xffff;
5009 static void cnic_init_bnx2x_kcq(
struct cnic_dev *dev)
5016 cp->
kcq1.sw_prod_idx = 0;
5021 cp->
kcq1.hw_prod_idx_ptr =
5023 cp->
kcq1.status_idx_ptr =
5028 cp->
kcq1.hw_prod_idx_ptr =
5030 cp->
kcq1.status_idx_ptr =
5039 cp->
kcq2.sw_prod_idx = 0;
5040 cp->
kcq2.hw_prod_idx_ptr =
5042 cp->
kcq2.status_idx_ptr =
5047 static int cnic_start_bnx2x_hw(
struct cnic_dev *dev)
5069 val = (val >> 1) & 1;
5073 cp->
pfid = func >> 1;
5076 cp->
pfid = func & 0x6;
5083 ret = cnic_init_id_tbl(&cp->
cid_tbl, MAX_ISCSI_TBL_SZ,
5099 cnic_init_bnx2x_kcq(dev);
5107 cp->
kcq1.dma.pg_map_arr[1] & 0xffffffff);
5110 (
u64) cp->
kcq1.dma.pg_map_arr[1] >> 32);
5113 cp->
kcq1.dma.pg_map_arr[0] & 0xffffffff);
5116 (
u64) cp->
kcq1.dma.pg_map_arr[0] >> 32);
5135 cnic_setup_bnx2x_context(dev);
5137 ret = cnic_init_bnx2x_irq(dev);
5144 static void cnic_init_rings(
struct cnic_dev *dev)
5153 cnic_init_bnx2_tx_ring(dev);
5154 cnic_init_bnx2_rx_ring(dev);
5157 u32 cli = cp->
ethdev->iscsi_l2_client_id;
5163 u32 off,
i, *cid_ptr;
5165 rx_prods.bd_prod = 0;
5177 CNIC_WR(dev, off + i * 4, ((
u32 *) &rx_prods)[i]);
5182 cid_ptr = udev->
l2_buf + 12;
5184 memset(data, 0,
sizeof(*data));
5186 cnic_init_bnx2x_tx_ring(dev, data);
5187 cnic_init_bnx2x_rx_ring(dev, data);
5204 "iSCSI CLIENT_SETUP did not complete\n");
5206 cnic_ring_ctl(dev, cid, cli, 1);
5211 static void cnic_shutdown_rings(
struct cnic_dev *dev)
5221 cnic_shutdown_bnx2_rx_ring(dev);
5223 u32 cli = cp->
ethdev->iscsi_l2_client_id;
5228 cnic_ring_ctl(dev, cid, cli, 0);
5243 "iSCSI CLIENT_HALT did not complete\n");
5246 memset(&l5_data, 0,
sizeof(l5_data));
5253 memset(rx_ring, 0, BCM_PAGE_SIZE);
5256 static int cnic_register_netdev(
struct cnic_dev *dev)
5270 netdev_err(dev->
netdev,
"register_cnic failed\n");
5275 static void cnic_unregister_netdev(
struct cnic_dev *dev)
5286 static int cnic_start_hw(
struct cnic_dev *dev)
5303 netdev_err(dev->
netdev,
"allocate resource failure\n");
5311 err = cnic_cm_open(dev);
5327 static void cnic_stop_bnx2_hw(
struct cnic_dev *dev)
5329 cnic_disable_bnx2_int_sync(dev);
5334 cnic_init_context(dev,
KWQ_CID);
5335 cnic_init_context(dev,
KCQ_CID);
5337 cnic_setup_5709_context(dev, 0);
5340 cnic_free_resc(dev);
5344 static void cnic_stop_bnx2x_hw(
struct cnic_dev *dev)
5349 *cp->
kcq1.hw_prod_idx_ptr = 0;
5353 cnic_free_resc(dev);
5356 static void cnic_stop_hw(
struct cnic_dev *dev)
5365 while (cp->
udev && cp->
udev->uio_dev != -1 && i < 15) {
5369 cnic_shutdown_rings(dev);
5374 cnic_cm_shutdown(dev);
5380 static void cnic_free_dev(
struct cnic_dev *dev)
5389 netdev_err(dev->
netdev,
"Failed waiting for ref count to go to zero\n");
5391 netdev_info(dev->
netdev,
"Removed CNIC device\n");
5407 netdev_err(dev,
"allocate dev struct failure\n");
5412 cdev->cnic_priv = (
char *)cdev +
sizeof(
struct cnic_dev);
5413 cdev->register_device = cnic_register_device;
5414 cdev->unregister_device = cnic_unregister_device;
5415 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5417 cp = cdev->cnic_priv;
5424 netdev_info(dev,
"Added CNIC device\n");
5439 ethdev = (*probe)(
dev);
5445 pdev = ethdev->
pdev;
5459 cdev = cnic_alloc_dev(dev, pdev);
5475 cp->
stop_hw = cnic_stop_bnx2_hw;
5479 cp->
start_cm = cnic_cm_init_bnx2_hw;
5480 cp->
stop_cm = cnic_cm_stop_bnx2_hw;
5501 ethdev = (*probe)(
dev);
5507 pdev = ethdev->
pdev;
5512 cdev = cnic_alloc_dev(dev, pdev);
5539 cp->
start_hw = cnic_start_bnx2x_hw;
5540 cp->
stop_hw = cnic_stop_bnx2x_hw;
5544 cp->
start_cm = cnic_cm_init_bnx2x_hw;
5545 cp->
stop_cm = cnic_cm_stop_bnx2x_hw;
5549 cp->
ack_int = cnic_ack_bnx2x_e2_msix;
5550 cp->
arm_int = cnic_arm_bnx2x_e2_msix;
5552 cp->
ack_int = cnic_ack_bnx2x_msix;
5553 cp->
arm_int = cnic_arm_bnx2x_msix;
5565 memset(&drvinfo, 0,
sizeof(drvinfo));
5568 if (!
strcmp(drvinfo.driver,
"bnx2"))
5569 cdev = init_bnx2_cnic(dev);
5570 if (!
strcmp(drvinfo.driver,
"bnx2x"))
5571 cdev = init_bnx2x_cnic(dev);
5574 list_add(&cdev->
list, &cnic_dev_list);
5581 static void cnic_rcv_netevent(
struct cnic_local *cp,
unsigned long event,
5603 static int cnic_netdev_event(
struct notifier_block *
this,
unsigned long event,
5610 dev = cnic_from_netdev(netdev);
5614 dev = is_cnic_dev(netdev);
5628 if (event ==
NETDEV_UP || (new_dev && netif_running(netdev))) {
5629 if (cnic_register_netdev(dev) != 0) {
5633 if (!cnic_start_hw(dev))
5634 cnic_ulp_start(dev);
5637 cnic_rcv_netevent(cp, event, 0);
5642 cnic_unregister_netdev(dev);
5645 list_del_init(&dev->
list);
5657 vid = cnic_get_vlan(netdev, &realdev);
5659 dev = cnic_from_netdev(realdev);
5662 cnic_rcv_netevent(dev->
cnic_priv, event, vid);
5672 .notifier_call = cnic_netdev_event
5675 static void cnic_release(
void)
5680 while (!list_empty(&cnic_dev_list)) {
5688 cnic_unregister_netdev(dev);
5689 list_del_init(&dev->
list);
5692 while (!list_empty(&cnic_udev_list)) {
5695 cnic_free_uio(udev);
5699 static int __init cnic_init(
void)
5721 static void __exit cnic_exit(
void)