28 #include <linux/sched.h>
95 static inline void *ocrdma_get_eqe(
struct ocrdma_eq *eq)
100 static inline void ocrdma_eq_inc_tail(
struct ocrdma_eq *eq)
108 ((
u8 *) dev->
mq.cq.va +
116 static inline void ocrdma_mcq_inc_tail(
struct ocrdma_dev *dev)
128 static inline void ocrdma_mq_inc_head(
struct ocrdma_dev *dev)
134 static inline void *ocrdma_get_mqe_rsp(
struct ocrdma_dev *dev)
136 return (
void *)((
u8 *) dev->
mq.sq.va +
183 static int ocrdma_get_mbx_errno(
u32 status)
191 switch (mbox_status) {
240 switch (add_status) {
255 switch (cqe_status) {
274 bool solicited,
u16 cqe_popped)
289 static void ocrdma_ring_mq_db(
struct ocrdma_dev *dev)
298 static void ocrdma_ring_eq_db(
struct ocrdma_dev *dev,
u16 eq_id,
299 bool arm,
bool clear_int,
u16 num_eqe)
329 mqe->
hdr.spcl_sge_cnt_emb |=
344 static int ocrdma_alloc_q(
struct ocrdma_dev *dev,
350 q->
size = len * entry_size;
359 static void ocrdma_build_q_pages(
struct ocrdma_pa *q_pa,
int cnt,
364 for (i = 0; i <
cnt; i++) {
365 q_pa[
i].
lo = (
u32) (host_pa & 0xffffffff);
367 host_pa += hw_page_size;
371 static void ocrdma_assign_eq_vect_gen2(
struct ocrdma_dev *dev,
376 dev->
nic_info.msix.start_vector += 1;
379 static void ocrdma_free_eq_vect_gen2(
struct ocrdma_dev *dev)
384 dev->
nic_info.msix.start_vector -= 1;
394 switch (queue_type) {
407 memset(cmd, 0,
sizeof(*cmd));
424 memset(cmd, 0,
sizeof(*cmd));
428 cmd->
req.rsvd_version = 0;
430 cmd->
req.rsvd_version = 2;
436 ocrdma_build_q_pages(&cmd->
pa[0], cmd->
num_pages, eq->
q.dma,
443 ocrdma_assign_eq_vect_gen2(dev, eq);
446 dev->
nic_info.msix.start_vector += 1;
448 eq->
q.created =
true;
453 static int ocrdma_create_eq(
struct ocrdma_dev *dev,
463 status = ocrdma_mbx_create_eq(dev, eq);
467 ocrdma_ring_eq_db(dev, eq->
q.id,
true,
true, 0);
471 ocrdma_free_q(dev, &eq->
q);
489 ocrdma_mbx_delete_q(dev, &eq->
q,
QTYPE_EQ);
491 ocrdma_free_eq_vect_gen2(dev);
492 ocrdma_free_q(dev, &eq->
q);
503 ocrdma_ring_eq_db(dev, eq->
q.id,
false,
false, 0);
505 irq = ocrdma_get_irq(dev, eq);
507 _ocrdma_destroy_eq(dev, eq);
510 static void ocrdma_destroy_qp_eqs(
struct ocrdma_dev *dev)
515 for (i = 0; i < dev->
eq_cnt; i++)
516 ocrdma_destroy_eq(dev, &dev->
qp_eq_tbl[i]);
519 static int ocrdma_mbx_mq_cq_create(
struct ocrdma_dev *dev,
527 memset(cmd, 0,
sizeof(*cmd));
546 static u32 ocrdma_encoded_q_len(
int q_len)
548 u32 len_encoded = fls(q_len);
550 if (len_encoded == 16)
555 static int ocrdma_mbx_create_mq(
struct ocrdma_dev *dev,
564 memset(cmd, 0,
sizeof(*cmd));
572 cmd->
v0.async_cqid_valid = (cq->
id << 1);
573 cmd->
v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->
len) <<
575 cmd->
v0.cqid_ringsize |=
582 cmd->
req.rsvd_version = 1;
586 cmd->
v1.async_event_bitmap =
Bit(20);
587 cmd->
v1.async_cqid_ringsize = cq->
id;
588 cmd->
v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->
len) <<
603 static int ocrdma_create_mq(
struct ocrdma_dev *dev)
613 status = ocrdma_mbx_mq_cq_create(dev, &dev->
mq.cq, &dev->
meq.q);
626 status = ocrdma_mbx_create_mq(dev, &dev->
mq.sq, &dev->
mq.cq);
633 ocrdma_free_q(dev, &dev->
mq.sq);
635 ocrdma_mbx_delete_q(dev, &dev->
mq.cq,
QTYPE_CQ);
637 ocrdma_free_q(dev, &dev->
mq.cq);
642 static void ocrdma_destroy_mq(
struct ocrdma_dev *dev)
651 ocrdma_free_q(dev, mbxq);
657 ocrdma_mbx_delete_q(dev, cq,
QTYPE_CQ);
658 ocrdma_free_q(dev, cq);
662 static void ocrdma_process_qpcat_error(
struct ocrdma_dev *dev,
673 static void ocrdma_dispatch_ibevent(
struct ocrdma_dev *dev,
691 ib_evt.device = &dev->
ibdev;
695 ib_evt.element.cq = &cq->
ibcq;
701 ib_evt.element.cq = &cq->
ibcq;
705 ib_evt.element.qp = &qp->
ibqp;
707 ocrdma_process_qpcat_error(dev, qp);
710 ib_evt.element.qp = &qp->
ibqp;
714 ib_evt.element.qp = &qp->
ibqp;
718 ib_evt.element.qp = &qp->
ibqp;
722 ib_evt.element.port_num = 1;
728 ib_evt.element.srq = &qp->
srq->ibsrq;
734 ib_evt.element.srq = &qp->
srq->ibsrq;
740 ib_evt.element.qp = &qp->
ibqp;
748 ocrdma_err(
"%s() unknown type=0x%x\n", __func__, type);
753 if (qp->
ibqp.event_handler)
754 qp->
ibqp.event_handler(&ib_evt, qp->
ibqp.qp_context);
755 }
else if (cq_event) {
756 if (cq->
ibcq.event_handler)
757 cq->
ibcq.event_handler(&ib_evt, cq->
ibcq.cq_context);
758 }
else if (srq_event) {
759 if (qp->
srq->ibsrq.event_handler)
760 qp->
srq->ibsrq.event_handler(&ib_evt,
763 }
else if (dev_event)
768 static void ocrdma_process_acqe(
struct ocrdma_dev *dev,
void *ae_cqe)
776 ocrdma_dispatch_ibevent(dev, cqe);
779 __func__, dev->
id, evt_code);
793 ocrdma_err(
"%s() cqe for invalid tag0x%x.expected=0x%x\n",
797 static int ocrdma_mq_cq_handler(
struct ocrdma_dev *dev,
u16 cq_id)
803 cqe = ocrdma_get_mcqe(dev);
806 ocrdma_le32_to_cpu(cqe,
sizeof(*cqe));
809 ocrdma_process_acqe(dev, cqe);
811 ocrdma_process_mcqe(dev, cqe);
813 ocrdma_err(
"%s() cqe->compl is not set.\n", __func__);
815 ocrdma_mcq_inc_tail(dev);
821 static void ocrdma_qp_buddy_cq_handler(
struct ocrdma_dev *dev,
826 bool buddy_cq_found =
false;
849 buddy_cq_found =
true;
852 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
853 if (buddy_cq_found ==
false)
855 if (cq->
ibcq.comp_handler) {
857 (*cq->
ibcq.comp_handler) (&cq->
ibcq, cq->
ibcq.cq_context);
858 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
862 static void ocrdma_qp_cq_handler(
struct ocrdma_dev *dev,
u16 cq_idx)
872 ocrdma_err(
"%s%d invalid id=0x%x\n", __func__, dev->
id, cq_idx);
878 spin_unlock_irqrestore(&cq->cq_lock, flags);
882 if (cq->
ibcq.comp_handler) {
884 (*cq->
ibcq.comp_handler) (&cq->
ibcq, cq->
ibcq.cq_context);
885 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
887 ocrdma_qp_buddy_cq_handler(dev, cq);
890 static void ocrdma_cq_handler(
struct ocrdma_dev *dev,
u16 cq_id)
893 if (cq_id == dev->
mq.cq.id)
894 ocrdma_mq_cq_handler(dev, cq_id);
896 ocrdma_qp_cq_handler(dev, cq_id);
908 ptr = ocrdma_get_eqe(eq);
910 ocrdma_le32_to_cpu(&eqe,
sizeof(eqe));
918 ocrdma_cq_handler(dev, cq_id);
920 ocrdma_eq_inc_tail(eq);
922 ocrdma_ring_eq_db(dev, eq->
q.id,
true,
true, eqe_popped);
925 ocrdma_ring_eq_db(dev, eq->
q.id,
true,
true, 0);
935 mqe = ocrdma_get_mqe(dev);
936 cmd->
hdr.tag_lo = dev->
mq.sq.head;
937 ocrdma_copy_cpu_to_le32(mqe, cmd,
sizeof(*mqe));
940 ocrdma_mq_inc_head(dev);
941 ocrdma_ring_mq_db(dev);
944 static int ocrdma_wait_mqe_cmpl(
struct ocrdma_dev *dev)
949 (dev->
mqe_ctx.cmd_done !=
false),
965 ocrdma_post_mqe(dev, mqe);
966 status = ocrdma_wait_mqe_cmpl(dev);
969 cqe_status = dev->
mqe_ctx.cqe_status;
970 ext_status = dev->
mqe_ctx.ext_status;
971 rsp = ocrdma_get_mqe_rsp(dev);
972 ocrdma_copy_le32_to_cpu(mqe, rsp, (
sizeof(*mqe)));
973 if (cqe_status || ext_status) {
975 (
"%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
979 status = ocrdma_get_mbx_cqe_errno(cqe_status);
983 status = ocrdma_get_mbx_errno(mqe->
u.
rsp.status);
989 static void ocrdma_get_attr(
struct ocrdma_dev *dev,
1052 static int ocrdma_check_fw_config(
struct ocrdma_dev *dev,
1067 static int ocrdma_mbx_query_fw_ver(
struct ocrdma_dev *dev)
1080 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1094 static int ocrdma_mbx_query_fw_config(
struct ocrdma_dev *dev)
1106 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1110 status = ocrdma_check_fw_config(dev, rsp);
1116 static int ocrdma_mbx_query_dev(
struct ocrdma_dev *dev)
1125 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1129 ocrdma_get_attr(dev, &dev->
attr, rsp);
1146 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1173 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1185 mem_size = *num_entries * entry_size;
1191 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1198 *num_entries = mem_size / entry_size;
1202 static int ocrdma_mbx_create_ah_tbl(
struct ocrdma_dev *dev)
1254 pbes[
i].
pa_lo = (
u32) (pa & 0xffffffff);
1260 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1282 static void ocrdma_mbx_delete_ah_tbl(
struct ocrdma_dev *dev)
1295 ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1309 int i, selected_eq = 0, cq_cnt = 0;
1318 for (i = 0; i < dev->
eq_cnt; i++) {
1319 if (dev->
qp_eq_tbl[i].cq_cnt < cq_cnt) {
1325 dev->
qp_eq_tbl[selected_eq].cq_cnt += 1;
1330 static void ocrdma_unbind_eq(
struct ocrdma_dev *dev,
u16 eq_id)
1335 for (i = 0; i < dev->
eq_cnt; i++) {
1347 int status = -
ENOMEM;
int max_hw_cqe;
1355 if (entries > dev->
attr.max_cqe) {
1356 ocrdma_err(
"%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1357 __func__, dev->
id, dev->
attr.max_cqe, entries);
1370 max_hw_cqe = dev->
attr.max_cqe;
1388 page_size = cq->
len / hw_pages;
1391 cmd->
cmd.pgsz_pgcnt |= hw_pages;
1396 cq->
eqn = ocrdma_bind_eq(dev);
1398 cqe_count = cq->
len / cqe_size;
1399 if (cqe_count > 1024)
1404 switch (cqe_count) {
1426 cmd->
cmd.cqe_count = (cq->
len / cqe_size);
1428 cmd->
cmd.cqe_count = (cq->
len / cqe_size) - 1;
1433 ocrdma_build_q_pages(&cmd->
cmd.pa[0], hw_pages, cq->
pa, page_size);
1434 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1443 ocrdma_unbind_eq(dev, cq->
eqn);
1466 ocrdma_unbind_eq(dev, cq->
eqn);
1467 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1477 u32 pdid,
int addr_check)
1500 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1520 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1564 for (i = 0; i < pbl_cnt; i++) {
1568 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1578 static int ocrdma_mbx_reg_mr_cont(
struct ocrdma_dev *dev,
1580 u32 pbl_offset,
u32 last)
1594 for (i = 0; i < pbl_cnt; i++) {
1596 (
u32) (hwmr->
pbl_table[i + pbl_offset].pa & 0xffffffff);
1600 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
1613 u32 cur_pbl_cnt, pbl_offset;
1618 if (cur_pbl_cnt == pending_pbl_cnt)
1621 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1622 cur_pbl_cnt, hwmr->
pbe_size, last);
1624 ocrdma_err(
"%s() status=%d\n", __func__, status);
1632 pbl_offset += cur_pbl_cnt;
1633 pending_pbl_cnt -= cur_pbl_cnt;
1638 if (cur_pbl_cnt == pending_pbl_cnt)
1641 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1647 ocrdma_err(
"%s() err. status=%d\n", __func__, status);
1681 unsigned long flags;
1692 spin_unlock_irqrestore(&qp->
dev->flush_q_lock, flags);
1698 unsigned long flags;
1701 new_state = get_ocrdma_qp_state(new_ib_state);
1708 if (new_state == qp->
state) {
1709 spin_unlock_irqrestore(&qp->q_lock, flags);
1713 switch (qp->
state) {
1715 switch (new_state) {
1726 switch (new_state) {
1740 switch (new_state) {
1753 switch (new_state) {
1767 switch (new_state) {
1778 switch (new_state) {
1789 switch (new_state) {
1804 spin_unlock_irqrestore(&qp->q_lock, flags);
1808 static u32 ocrdma_set_create_qp_mbx_access_flags(
struct ocrdma_qp *qp)
1829 u32 len, hw_pages, hw_page_size;
1833 u32 max_wqe_allocated;
1834 u32 max_sges = attrs->
cap.max_send_sge;
1836 max_wqe_allocated = attrs->
cap.max_send_wr;
1839 max_wqe_allocated += 1;
1841 status = ocrdma_build_q_conf(&max_wqe_allocated,
1842 dev->
attr.wqe_size, &hw_pages, &hw_page_size);
1844 ocrdma_err(
"%s() req. max_send_wr=0x%x\n", __func__,
1848 qp->
sq.max_cnt = max_wqe_allocated;
1849 len = (hw_pages * hw_page_size);
1857 qp->
sq.entry_size = dev->
attr.wqe_size;
1858 ocrdma_build_q_pages(&cmd->
wq_addr[0], hw_pages, pa, hw_page_size);
1885 u32 len, hw_pages, hw_page_size;
1889 u32 max_rqe_allocated = attrs->
cap.max_recv_wr + 1;
1891 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->
attr.rqe_size,
1892 &hw_pages, &hw_page_size);
1894 ocrdma_err(
"%s() req. max_recv_wr=0x%x\n", __func__,
1895 attrs->
cap.max_recv_wr + 1);
1898 qp->
rq.max_cnt = max_rqe_allocated;
1899 len = (hw_pages * hw_page_size);
1907 qp->
rq.entry_size = dev->
attr.rqe_size;
1909 ocrdma_build_q_pages(&cmd->
rq_addr[0], hw_pages, pa, hw_page_size);
1930 u8 enable_dpp_cq,
u16 dpp_cq_id)
1949 int ird_page_size = dev->
attr.ird_page_size;
1950 int ird_q_len = dev->
attr.num_ird_pages * ird_page_size;
1952 if (dev->
attr.ird == 0)
1960 ocrdma_build_q_pages(&cmd->
ird_addr[0], dev->
attr.num_ird_pages,
1968 u16 *dpp_offset,
u16 *dpp_credit_lmt)
1970 u32 max_wqe_allocated, max_rqe_allocated;
1988 max_wqe_allocated = 1 << max_wqe_allocated;
1991 qp->
sq.max_cnt = max_wqe_allocated;
1992 qp->
sq.max_wqe_idx = max_wqe_allocated - 1;
1995 qp->
rq.max_cnt = max_rqe_allocated;
1996 qp->
rq.max_wqe_idx = max_rqe_allocated - 1;
2001 u8 enable_dpp_cq,
u16 dpp_cq_id,
u16 *dpp_offset,
2002 u16 *dpp_credit_lmt)
2033 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2043 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2048 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2055 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2064 cq = get_ocrdma_cq(attrs->
send_cq);
2068 cq = get_ocrdma_cq(attrs->
recv_cq);
2074 (attrs->
cap.max_inline_data <= dev->
attr.max_inline_data))
2075 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2078 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
2082 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2109 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
2124 memcpy(&in6, dgid,
sizeof in6);
2125 if (rdma_is_multicast_addr(&in6))
2126 rdma_get_mcast_mac(&in6, mac_addr);
2127 else if (rdma_link_local_addr(&in6))
2128 rdma_get_ll_mac(&in6, mac_addr);
2130 ocrdma_err(
"%s() fail to resolve mac_addr.\n", __func__);
2136 static void ocrdma_set_av_params(
struct ocrdma_qp *qp,
2146 cmd->
params.tclass_sq_psn |=
2148 cmd->
params.rnt_rc_sl_fl |=
2150 cmd->
params.hop_lmt_rq_psn |=
2154 sizeof(cmd->
params.dgid));
2156 ah_attr->
grh.sgid_index, &sgid);
2160 cmd->
params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2161 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2163 ocrdma_cpu_to_le32(&cmd->
params.dgid[0],
sizeof(cmd->
params.dgid));
2164 ocrdma_cpu_to_le32(&cmd->
params.sgid[0],
sizeof(cmd->
params.sgid));
2165 cmd->
params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2166 vlan_id = rdma_get_vlan_id(&sgid);
2167 if (vlan_id && (vlan_id < 0x1000)) {
2168 cmd->
params.vlan_dmac_b4_to_b5 |=
2174 static int ocrdma_set_qp_params(
struct ocrdma_qp *qp,
2181 int eth_mtu = iboe_get_mtu(netdev->
mtu);
2194 ocrdma_set_av_params(qp, cmd, attrs);
2197 cmd->
params.dmac_b0_to_b3 = qp->
dev->nic_info.mac_addr[0] |
2198 (qp->
dev->nic_info.mac_addr[1] << 8) |
2199 (qp->
dev->nic_info.mac_addr[2] << 16) |
2200 (qp->
dev->nic_info.mac_addr[3] << 24);
2201 cmd->
params.vlan_dmac_b4_to_b5 = qp->
dev->nic_info.mac_addr[4] |
2202 (qp->
dev->nic_info.mac_addr[5] << 8);
2206 cmd->
params.max_sge_recv_flags |=
2216 if (ib_mtu_enum_to_int(eth_mtu) <
2217 ib_mtu_enum_to_int(attrs->
path_mtu)) {
2221 cmd->
params.path_mtu_pkey_indx |=
2222 (ib_mtu_enum_to_int(attrs->
path_mtu) <<
2251 cmd->
params.tclass_sq_psn |= (attrs->
sq_psn & 0x00ffffff);
2255 cmd->
params.hop_lmt_rq_psn |= (attrs->
rq_psn & 0x00ffffff);
2295 cmd->
params.max_sge_recv_flags |=
2296 (get_ocrdma_qp_state(attrs->
qp_state) <<
2301 cmd->
params.max_sge_recv_flags |=
2304 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2307 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
2326 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
2334 if (!qp->
srq && qp->
rq.va)
2337 qp->
pd->num_dpp_qp++;
2346 int hw_pages, hw_page_size;
2353 u32 max_rqe_allocated;
2360 max_rqe_allocated = srq_attr->
attr.max_wr + 1;
2361 status = ocrdma_build_q_conf(&max_rqe_allocated,
2363 &hw_pages, &hw_page_size);
2365 ocrdma_err(
"%s() req. max_wr=0x%x\n", __func__,
2366 srq_attr->
attr.max_wr);
2370 len = hw_pages * hw_page_size;
2376 ocrdma_build_q_pages(&cmd->
rq_addr[0], hw_pages, pa, hw_page_size);
2378 srq->
rq.entry_size = dev->
attr.rqe_size;
2381 srq->
rq.max_cnt = max_rqe_allocated;
2394 status = ocrdma_mbx_cmd(dev, (
struct ocrdma_mqe *)cmd);
2399 srq->
rq.dbid = rsp->
id;
2403 max_rqe_allocated = (1 << max_rqe_allocated);
2404 srq->
rq.max_cnt = max_rqe_allocated;
2405 srq->
rq.max_wqe_idx = max_rqe_allocated - 1;
2427 status = ocrdma_mbx_cmd(srq->
dev, (
struct ocrdma_mqe *)cmd);
2439 cmd->
id = srq->
rq.dbid;
2440 status = ocrdma_mbx_cmd(srq->
dev, (
struct ocrdma_mqe *)cmd);
2465 status = ocrdma_mbx_cmd(srq->
dev, (
struct ocrdma_mqe *)cmd);
2468 srq->
rq.va, srq->
rq.pa);
2478 unsigned long flags;
2482 for (i = 0; i < dev->
av_tbl.num_ah; i++) {
2483 if (av->
valid == 0) {
2492 if (i == dev->
av_tbl.num_ah)
2494 spin_unlock_irqrestore(&dev->
av_tbl.lock, flags);
2500 unsigned long flags;
2503 spin_unlock_irqrestore(&dev->
av_tbl.lock, flags);
2507 static int ocrdma_create_mq_eq(
struct ocrdma_dev *dev)
2511 unsigned long flags = 0;
2517 num_eq = dev->
nic_info.msix.num_vectors -
2530 irq = ocrdma_get_irq(dev, &dev->
meq);
2531 status =
request_irq(irq, ocrdma_irq_handler, flags, dev->
meq.irq_name,
2534 _ocrdma_destroy_eq(dev, &dev->
meq);
2538 static int ocrdma_create_qp_eqs(
struct ocrdma_dev *dev)
2540 int num_eq,
i, status = 0;
2542 unsigned long flags = 0;
2544 num_eq = dev->
nic_info.msix.num_vectors -
2555 for (i = 0; i < num_eq; i++) {
2556 status = ocrdma_create_eq(dev, &dev->
qp_eq_tbl[i],
2564 irq = ocrdma_get_irq(dev, &dev->
qp_eq_tbl[i]);
2565 status =
request_irq(irq, ocrdma_irq_handler, flags,
2569 _ocrdma_destroy_eq(dev, &dev->
qp_eq_tbl[i]);
2579 ocrdma_destroy_qp_eqs(dev);
2587 status = ocrdma_create_mq_eq(dev);
2591 status = ocrdma_create_qp_eqs(dev);
2594 status = ocrdma_create_mq(dev);
2597 status = ocrdma_mbx_query_fw_config(dev);
2600 status = ocrdma_mbx_query_dev(dev);
2603 status = ocrdma_mbx_query_fw_ver(dev);
2606 status = ocrdma_mbx_create_ah_tbl(dev);
2612 ocrdma_destroy_mq(dev);
2614 ocrdma_destroy_qp_eqs(dev);
2616 ocrdma_destroy_eq(dev, &dev->
meq);
2617 ocrdma_err(
"%s() status=%d\n", __func__, status);
2623 ocrdma_mbx_delete_ah_tbl(dev);
2626 ocrdma_destroy_qp_eqs(dev);
2629 ocrdma_destroy_mq(dev);
2630 ocrdma_destroy_eq(dev, &dev->
meq);