54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0,
sizeof(*sgid));
68 memset(attr, 0,
sizeof *attr);
97 min(dev->
attr.max_ord_per_qp, dev->
attr.max_ird_per_qp);
115 dev = get_ocrdma_dev(ibdev);
117 ocrdma_err(
"%s(%d) invalid_port=0x%x\n", __func__,
122 if (netif_running(netdev) && netif_oper_up(netdev)) {
156 dev = get_ocrdma_dev(ibdev);
158 ocrdma_err(
"%s(%d) invalid_port=0x%x\n", __func__,
175 INIT_LIST_HEAD(&mm->
entry);
190 if (len != mm->
key.len || phy_addr != mm->
key.phy_addr)
208 if (len != mm->
key.len || phy_addr != mm->
key.phy_addr)
244 ctx->
ah_tbl.len = map_len;
260 status = ib_copy_to_udata(udata, &resp,
sizeof(resp));
271 return ERR_PTR(status);
280 ocrdma_del_mmap(uctx, uctx->
ah_tbl.pa, uctx->
ah_tbl.len);
308 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
310 (len <= dev->
nic_info.db_page_size)) {
314 }
else if (dev->
nic_info.dpp_unmapped_len &&
315 (vm_page >= (
u64) dev->
nic_info.dpp_unmapped_addr) &&
316 (vm_page <= (
u64) (dev->
nic_info.dpp_unmapped_addr +
318 (len <= dev->
nic_info.dpp_unmapped_len)) {
331 static int ocrdma_copy_pd_uresp(
struct ocrdma_pd *pd,
337 u64 dpp_page_addr = 0;
344 db_page_addr = pd->
dev->nic_info.unmapped_db +
345 (pd->
id * pd->
dev->nic_info.db_page_size);
346 db_page_size = pd->
dev->nic_info.db_page_size;
348 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
353 dpp_page_addr = pd->
dev->nic_info.dpp_unmapped_addr +
355 status = ocrdma_add_mmap(uctx, dpp_page_addr,
360 rsp.dpp_page_addr_lo = dpp_page_addr;
363 status = ib_copy_to_udata(udata, &
rsp,
sizeof(
rsp));
374 ocrdma_del_mmap(pd->
uctx, db_page_addr, db_page_size);
390 if (udata && context) {
399 return ERR_PTR(status);
403 if (udata && context) {
404 status = ocrdma_copy_pd_uresp(pd, context, udata);
412 return ERR_PTR(status);
417 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
424 __func__, dev->
id, pd->
id);
434 usr_db = dev->
nic_info.unmapped_db +
436 ocrdma_del_mmap(pd->
uctx, usr_db, dev->
nic_info.db_page_size);
444 int acc,
u32 num_pbls,
449 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
453 ocrdma_err(
"%s(%d) leaving err, invalid access rights\n",
463 mr->
hwmr.local_rd = 1;
469 mr->
hwmr.num_pbls = num_pbls;
479 if (mr->
hwmr.remote_wr || mr->
hwmr.remote_rd)
495 static void ocrdma_free_mr_pbl_tbl(
struct ocrdma_dev *dev,
502 for (i = 0; i < mr->
num_pbls; i++) {
514 static int ocrdma_get_pbl_info(
struct ocrdma_mr *mr,
u32 num_pbes)
527 num_pbls =
roundup(num_pbes, (pbl_size /
sizeof(
u64)));
528 num_pbls = num_pbls / (pbl_size /
sizeof(
u64));
530 }
while (num_pbls >= mr->
hwmr.dev->attr.max_num_mr_pbl);
532 mr->
hwmr.num_pbes = num_pbes;
533 mr->
hwmr.num_pbls = num_pbls;
534 mr->
hwmr.pbl_size = pbl_size;
553 for (i = 0; i < mr->
num_pbls; i++) {
556 ocrdma_free_mr_pbl_tbl(dev, mr);
574 int i, shift, pg_cnt,
pages, pbe_cnt, total_num_pbes = 0;
576 if (!mr->
hwmr.num_pbes)
586 for (i = 0; i < chunk->
nmap; i++) {
588 for (pg_cnt = 0; pg_cnt <
pages; pg_cnt++) {
604 if (total_num_pbes == num_pbes)
611 (mr->
hwmr.pbl_size /
sizeof(
u64))) {
630 pd = get_ocrdma_pd(ibpd);
638 return ERR_PTR(status);
641 if (IS_ERR(mr->
umem)) {
646 status = ocrdma_get_pbl_info(mr, num_pbes);
650 mr->
hwmr.pbe_size = mr->
umem->page_size;
652 mr->
hwmr.va = usr_addr;
657 mr->
hwmr.local_rd = 1;
659 status = ocrdma_build_pbl_tbl(dev, &mr->
hwmr);
662 build_user_pbes(dev, mr, num_pbes);
669 if (mr->
hwmr.remote_wr || mr->
hwmr.remote_rd)
675 ocrdma_free_mr_pbl_tbl(dev, &mr->
hwmr);
678 return ERR_PTR(status);
683 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
689 if (mr->
hwmr.fr_mr == 0)
690 ocrdma_free_mr_pbl_tbl(dev, &mr->
hwmr);
708 uresp.page_size = cq->
len;
711 uresp.page_addr[0] = cq->
pa;
712 uresp.db_page_addr = cq->
dev->nic_info.unmapped_db;
713 uresp.db_page_size = cq->
dev->nic_info.db_page_size;
715 status = ib_copy_to_udata(udata, &uresp,
sizeof(uresp));
718 __func__, cq->
dev->id, cq->
id);
721 uctx = get_ocrdma_ucontext(ib_ctx);
722 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
725 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
727 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
740 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
745 if (ib_copy_from_udata(&ureq, udata,
sizeof(ureq)))
756 INIT_LIST_HEAD(&cq->sq_head);
763 return ERR_PTR(status);
766 status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
779 return ERR_PTR(status);
786 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
788 if (new_cnt < 1 || new_cnt > cq->
max_hw_cqe) {
799 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
834 static int ocrdma_check_qp_params(
struct ib_pd *ibpd,
struct ocrdma_dev *dev,
840 ocrdma_err(
"%s(%d) unsupported qp type=0x%x requested\n",
844 if (attrs->
cap.max_send_wr > dev->
attr.max_wqe) {
845 ocrdma_err(
"%s(%d) unsupported send_wr=0x%x requested\n",
846 __func__, dev->
id, attrs->
cap.max_send_wr);
848 __func__, dev->
id, dev->
attr.max_wqe);
851 if (!attrs->
srq && (attrs->
cap.max_recv_wr > dev->
attr.max_rqe)) {
852 ocrdma_err(
"%s(%d) unsupported recv_wr=0x%x requested\n",
853 __func__, dev->
id, attrs->
cap.max_recv_wr);
855 __func__, dev->
id, dev->
attr.max_rqe);
858 if (attrs->
cap.max_inline_data > dev->
attr.max_inline_data) {
859 ocrdma_err(
"%s(%d) unsupported inline data size=0x%x"
860 " requested\n", __func__, dev->
id,
861 attrs->
cap.max_inline_data);
862 ocrdma_err(
"%s(%d) supported inline data size=0x%x\n",
863 __func__, dev->
id, dev->
attr.max_inline_data);
866 if (attrs->
cap.max_send_sge > dev->
attr.max_send_sge) {
867 ocrdma_err(
"%s(%d) unsupported send_sge=0x%x requested\n",
868 __func__, dev->
id, attrs->
cap.max_send_sge);
869 ocrdma_err(
"%s(%d) supported send_sge=0x%x\n",
870 __func__, dev->
id, dev->
attr.max_send_sge);
873 if (attrs->
cap.max_recv_sge > dev->
attr.max_recv_sge) {
874 ocrdma_err(
"%s(%d) unsupported recv_sge=0x%x requested\n",
875 __func__, dev->
id, attrs->
cap.max_recv_sge);
876 ocrdma_err(
"%s(%d) supported recv_sge=0x%x\n",
877 __func__, dev->
id, dev->
attr.max_recv_sge);
883 (
"%s(%d) Userspace can't create special QPs of type=0x%x\n",
889 ocrdma_err(
"%s(%d) GSI special QPs already created.\n",
899 ocrdma_err(
"%s(%d) Consumer QP cannot use GSI CQs.\n",
907 static int ocrdma_copy_qp_uresp(
struct ocrdma_qp *qp,
908 struct ib_udata *udata,
int dpp_offset,
909 int dpp_credit_lmt,
int srq)
917 memset(&uresp, 0,
sizeof(uresp));
918 usr_db = dev->
nic_info.unmapped_db +
920 uresp.qp_id = qp->
id;
921 uresp.sq_dbid = qp->
sq.dbid;
922 uresp.num_sq_pages = 1;
923 uresp.sq_page_size = qp->
sq.len;
924 uresp.sq_page_addr[0] = qp->
sq.pa;
925 uresp.num_wqe_allocated = qp->
sq.max_cnt;
927 uresp.rq_dbid = qp->
rq.dbid;
928 uresp.num_rq_pages = 1;
929 uresp.rq_page_size = qp->
rq.len;
930 uresp.rq_page_addr[0] = qp->
rq.pa;
931 uresp.num_rqe_allocated = qp->
rq.max_cnt;
933 uresp.db_page_addr = usr_db;
934 uresp.db_page_size = dev->
nic_info.db_page_size;
937 uresp.db_rq_offset = ((qp->
id & 0xFFFF) < 128) ?
939 uresp.db_shift = (qp->
id < 128) ? 24 : 16;
947 uresp.dpp_credit = dpp_credit_lmt;
948 uresp.dpp_offset = dpp_offset;
950 status = ib_copy_to_udata(udata, &uresp,
sizeof(uresp));
952 ocrdma_err(
"%s(%d) user copy error.\n", __func__, dev->
id);
955 status = ocrdma_add_mmap(pd->
uctx, uresp.sq_page_addr[0],
961 status = ocrdma_add_mmap(pd->
uctx, uresp.rq_page_addr[0],
968 ocrdma_del_mmap(pd->
uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
994 static int ocrdma_alloc_wr_id_tbl(
struct ocrdma_qp *qp)
1009 static void ocrdma_set_qp_init_params(
struct ocrdma_qp *qp,
1021 qp->
sq.max_sges = attrs->
cap.max_send_sge;
1022 qp->
rq.max_sges = attrs->
cap.max_recv_sge;
1033 qp->
ibqp.qp_num = qp->
id;
1036 static void ocrdma_store_gsi_qp_cq(
struct ocrdma_dev *dev,
1051 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1055 u16 dpp_credit_lmt, dpp_offset;
1057 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1061 memset(&ureq, 0,
sizeof(ureq));
1063 if (ib_copy_from_udata(&ureq, udata,
sizeof(ureq)))
1072 ocrdma_set_qp_init_params(qp, pd, attrs);
1077 &dpp_offset, &dpp_credit_lmt);
1082 if (udata ==
NULL) {
1085 status = ocrdma_alloc_wr_id_tbl(qp);
1090 status = ocrdma_add_qpn_map(dev, qp);
1093 ocrdma_set_qp_db(dev, qp, pd);
1095 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1101 ocrdma_store_gsi_qp_cq(dev, attrs);
1102 ocrdma_set_qp_use_cnt(qp, pd);
1107 ocrdma_del_qpn_map(dev, qp);
1115 ocrdma_err(
"%s(%d) error=%d\n", __func__, dev->
id, status);
1117 return ERR_PTR(status);
1128 qp = get_ocrdma_qp(ibqp);
1142 int attr_mask,
struct ib_udata *udata)
1144 unsigned long flags;
1150 qp = get_ocrdma_qp(ibqp);
1162 spin_unlock_irqrestore(&qp->q_lock, flags);
1165 ocrdma_err(
"%s(%d) invalid attribute mask=0x%x specified for "
1166 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1167 __func__, dev->
id, attr_mask, qp->
id, ibqp->
qp_type,
1180 static enum ib_mtu ocrdma_mtu_int_to_enum(
u16 mtu)
1198 static int ocrdma_to_ib_qp_acc_flags(
int qp_cap_flags)
1200 int ib_qp_acc_flags = 0;
1206 return ib_qp_acc_flags;
1216 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1219 memset(¶ms, 0,
sizeof(params));
1238 qp_attr->
cap.max_send_wr = qp->
sq.max_cnt - 1;
1239 qp_attr->
cap.max_recv_wr = qp->
rq.max_cnt - 1;
1240 qp_attr->
cap.max_send_sge = qp->
sq.max_sges;
1241 qp_attr->
cap.max_recv_sge = qp->
rq.max_sges;
1242 qp_attr->
cap.max_inline_data = dev->
attr.max_inline_data;
1243 qp_init_attr->
cap = qp_attr->
cap;
1245 sizeof(params.
dgid));
1257 qp_attr->
ah_attr.port_num = 1;
1273 qp_attr->
ah_attr.src_path_bits = 0;
1274 qp_attr->
ah_attr.static_rate = 0;
1292 static void ocrdma_srq_toggle_bit(
struct ocrdma_srq *srq,
int idx)
1295 unsigned int mask = (1 << (idx % 32));
1313 static int is_hw_sq_empty(
struct ocrdma_qp *qp)
1315 return (qp->
sq.tail == qp->
sq.head &&
1316 ocrdma_hwq_free_cnt(&qp->
sq) ? 1 : 0);
1319 static int is_hw_rq_empty(
struct ocrdma_qp *qp)
1321 return (qp->
rq.tail == qp->
rq.head) ? 1 : 0;
1348 unsigned long cq_flags;
1349 unsigned long flags;
1350 int discard_cnt = 0;
1351 u32 cur_getp, stop_getp;
1364 cur_getp = cq->
getp;
1366 stop_getp = cur_getp;
1368 if (is_hw_sq_empty(qp) && (!qp->
srq && is_hw_rq_empty(qp)))
1371 cqe = cq->
va + cur_getp;
1379 if (qpn == 0 || qpn != qp->
id)
1388 ocrdma_hwq_inc_tail(&qp->
sq);
1392 ocrdma_hwq_inc_tail(&qp->
srq->rq);
1393 ocrdma_srq_toggle_bit(qp->
srq, cur_getp);
1394 spin_unlock_irqrestore(&qp->
srq->q_lock, flags);
1397 ocrdma_hwq_inc_tail(&qp->
rq);
1401 }
while (cur_getp != stop_getp);
1402 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1405 static void ocrdma_del_flush_qp(
struct ocrdma_qp *qp)
1408 unsigned long flags;
1421 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1432 unsigned long flags;
1434 qp = get_ocrdma_qp(ibqp);
1456 spin_lock(&qp->
rq_cq->cq_lock);
1458 ocrdma_del_qpn_map(dev, qp);
1461 spin_unlock(&qp->
rq_cq->cq_lock);
1462 spin_unlock_irqrestore(&qp->
sq_cq->cq_lock, flags);
1465 ocrdma_discard_cqes(qp, qp->
sq_cq);
1466 ocrdma_discard_cqes(qp, qp->
rq_cq);
1471 ocrdma_del_mmap(pd->
uctx, (
u64) qp->
sq.pa, qp->
sq.len);
1473 ocrdma_del_mmap(pd->
uctx, (
u64) qp->
rq.pa, qp->
rq.len);
1476 ocrdma_del_flush_qp(qp);
1495 uresp.num_rq_pages = 1;
1496 uresp.rq_page_addr[0] = srq->
rq.pa;
1497 uresp.rq_page_size = srq->
rq.len;
1498 uresp.db_page_addr = srq->
dev->nic_info.unmapped_db +
1499 (srq->
pd->id * srq->
dev->nic_info.db_page_size);
1500 uresp.db_page_size = srq->
dev->nic_info.db_page_size;
1501 uresp.num_rqe_allocated = srq->
rq.max_cnt;
1504 uresp.db_shift = 24;
1507 uresp.db_shift = 16;
1510 status = ib_copy_to_udata(udata, &uresp,
sizeof(uresp));
1513 status = ocrdma_add_mmap(srq->
pd->uctx, uresp.rq_page_addr[0],
1514 uresp.rq_page_size);
1525 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1529 if (init_attr->
attr.max_sge > dev->
attr.max_recv_sge)
1531 if (init_attr->
attr.max_wr > dev->
attr.max_rqe)
1536 return ERR_PTR(status);
1546 if (udata ==
NULL) {
1553 (srq->
rq.max_cnt % 32 ? 1 : 0);
1562 if (init_attr->
attr.srq_limit) {
1570 status = ocrdma_copy_srq_uresp(srq, udata);
1584 return ERR_PTR(status);
1595 srq = get_ocrdma_srq(ibsrq);
1608 srq = get_ocrdma_srq(ibsrq);
1619 srq = get_ocrdma_srq(ibsrq);
1623 __func__, dev->
id, srq->
id);
1630 ocrdma_del_mmap(srq->
pd->uctx, (
u64) srq->
rq.pa, srq->
rq.len);
1640 static void ocrdma_build_ud_hdr(
struct ocrdma_qp *qp,
1652 ud_hdr->
qkey = wr->
wr.
ud.remote_qkey;
1662 for (i = 0; i < num_sge; i++) {
1670 memset(sge, 0,
sizeof(*sge));
1673 static int ocrdma_build_inline_sges(
struct ocrdma_qp *qp,
1681 " unspported len req=0x%x\n", __func__,
1686 (
void *)(
unsigned long)wr->
sg_list[0].addr,
1708 u32 wqe_size =
sizeof(*hdr);
1711 ocrdma_build_ud_hdr(qp, hdr, wr);
1717 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1727 u32 wqe_size =
sizeof(*hdr) +
sizeof(*ext_rw);
1729 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1732 ext_rw->addr_lo = wr->
wr.
rdma.remote_addr;
1734 ext_rw->lrkey = wr->
wr.
rdma.rkey;
1758 static void ocrdma_ring_sq_db(
struct ocrdma_qp *qp)
1769 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1771 unsigned long flags;
1775 spin_unlock_irqrestore(&qp->q_lock, flags);
1780 if (ocrdma_hwq_free_cnt(&qp->
sq) == 0 ||
1785 hdr = ocrdma_hwq_head(&qp->
sq);
1802 ocrdma_build_send(qp, hdr, wr);
1808 status = ocrdma_build_send(qp, hdr, wr);
1815 status = ocrdma_build_write(qp, hdr, wr);
1820 ocrdma_build_read(qp, hdr, wr);
1826 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
1847 ocrdma_ring_sq_db(qp);
1850 ocrdma_hwq_inc_head(&qp->
sq);
1853 spin_unlock_irqrestore(&qp->q_lock, flags);
1857 static void ocrdma_ring_rq_db(
struct ocrdma_qp *qp)
1870 wqe_size = (wr->
num_sge *
sizeof(*sge)) +
sizeof(*rqe);
1872 wqe_size =
sizeof(*sge) +
sizeof(*rqe);
1879 rqe->rsvd_tag =
tag;
1882 ocrdma_cpu_to_le32(rqe, wqe_size);
1889 unsigned long flags;
1890 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1895 spin_unlock_irqrestore(&qp->q_lock, flags);
1900 if (ocrdma_hwq_free_cnt(&qp->
rq) == 0 ||
1906 rqe = ocrdma_hwq_head(&qp->
rq);
1907 ocrdma_build_rqe(rqe, wr, 0);
1914 ocrdma_ring_rq_db(qp);
1917 ocrdma_hwq_inc_head(&qp->
rq);
1920 spin_unlock_irqrestore(&qp->q_lock, flags);
1929 static int ocrdma_srq_get_idx(
struct ocrdma_srq *srq)
1937 indx = (row * 32) + (indx - 1);
1938 if (indx >= srq->
rq.max_cnt)
1940 ocrdma_srq_toggle_bit(srq, indx);
1950 static void ocrdma_ring_srq_db(
struct ocrdma_srq *srq)
1952 u32 val = srq->
rq.dbid | (1 << 16);
1961 unsigned long flags;
1966 srq = get_ocrdma_srq(ibsrq);
1970 if (ocrdma_hwq_free_cnt(&srq->
rq) == 0 ||
1976 tag = ocrdma_srq_get_idx(srq);
1977 rqe = ocrdma_hwq_head(&srq->
rq);
1978 ocrdma_build_rqe(rqe, wr, tag);
1984 ocrdma_ring_srq_db(srq);
1986 ocrdma_hwq_inc_head(&srq->
rq);
1989 spin_unlock_irqrestore(&srq->q_lock, flags);
2068 static void ocrdma_update_wc(
struct ocrdma_qp *qp,
struct ib_wc *ibwc,
2075 hdr = ocrdma_hwq_head_from_idx(&qp->
sq, wqe_idx);
2097 ocrdma_err(
"%s() invalid opcode received = 0x%x\n",
2103 static void ocrdma_set_cqe_status_flushed(
struct ocrdma_qp *qp,
2135 static bool ocrdma_update_err_cqe(
struct ib_wc *ibwc,
struct ocrdma_cqe *cqe,
2138 bool expand =
false;
2142 ibwc->
status = ocrdma_to_ibwc_err(status);
2150 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2152 ocrdma_set_cqe_status_flushed(qp, cqe);
2157 static int ocrdma_update_err_rcqe(
struct ib_wc *ibwc,
struct ocrdma_cqe *cqe,
2162 ocrdma_hwq_inc_tail(&qp->
rq);
2164 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2167 static int ocrdma_update_err_scqe(
struct ib_wc *ibwc,
struct ocrdma_cqe *cqe,
2170 ocrdma_update_wc(qp, ibwc, qp->
sq.tail);
2171 ocrdma_hwq_inc_tail(&qp->
sq);
2173 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2177 static bool ocrdma_poll_err_scqe(
struct ocrdma_qp *qp,
2179 bool *polled,
bool *
stop)
2188 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2195 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2208 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2213 static bool ocrdma_poll_success_scqe(
struct ocrdma_qp *qp,
2215 struct ib_wc *ibwc,
bool *polled)
2217 bool expand =
false;
2227 ocrdma_update_wc(qp, ibwc, tail);
2231 if (tail != wqe_idx)
2234 ocrdma_hwq_inc_tail(&qp->
sq);
2239 struct ib_wc *ibwc,
bool *polled,
bool *stop)
2248 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2250 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2254 static int ocrdma_update_ud_rcqe(
struct ib_wc *ibwc,
struct ocrdma_cqe *cqe)
2270 static void ocrdma_update_free_srq_cqe(
struct ib_wc *ibwc,
2274 unsigned long flags;
2278 srq = get_ocrdma_srq(qp->
ibqp.srq);
2282 ocrdma_srq_toggle_bit(srq, wqe_idx);
2283 spin_unlock_irqrestore(&srq->q_lock, flags);
2284 ocrdma_hwq_inc_tail(&srq->
rq);
2288 struct ib_wc *ibwc,
bool *polled,
bool *stop,
2296 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2300 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2308 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2313 static void ocrdma_poll_success_rcqe(
struct ocrdma_qp *qp,
2321 ocrdma_update_ud_rcqe(ibwc, cqe);
2337 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2340 ocrdma_hwq_inc_tail(&qp->
rq);
2345 struct ib_wc *ibwc,
bool *polled,
bool *stop)
2348 bool expand =
false;
2361 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2363 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2385 bool expand =
false;
2386 int polled_hw_cqes = 0;
2390 u16 cur_getp;
bool polled =
false;
bool stop =
false;
2392 cur_getp = cq->
getp;
2393 while (num_entries) {
2394 cqe = cq->
va + cur_getp;
2406 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2409 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2419 polled_hw_cqes += 1;
2421 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2431 cq->
getp = cur_getp;
2432 if (polled_hw_cqes || expand || stop) {
2440 static int ocrdma_add_err_cqe(
struct ocrdma_cq *cq,
int num_entries,
2445 while (num_entries) {
2446 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2448 if (!is_hw_sq_empty(qp) && qp->
sq_cq == cq) {
2449 ocrdma_update_wc(qp, ibwc, qp->
sq.tail);
2450 ocrdma_hwq_inc_tail(&qp->
sq);
2451 }
else if (!is_hw_rq_empty(qp) && qp->
rq_cq == cq) {
2453 ocrdma_hwq_inc_tail(&qp->
rq);
2469 unsigned long flags;
2471 int num_os_cqe = 0, err_cqes = 0;
2474 cq = get_ocrdma_cq(ibcq);
2479 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2480 spin_unlock_irqrestore(&cq->cq_lock, flags);
2481 cqes_to_poll -= num_os_cqe;
2484 wc = wc + num_os_cqe;
2492 if (cqes_to_poll == 0)
2494 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2495 cqes_to_poll -= err_cqes;
2496 num_os_cqe += err_cqes;
2499 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2507 unsigned long flags;
2513 cq = get_ocrdma_cq(ibcq);
2520 if (cq_flags & IB_CQ_SOLICITED)
2523 cur_getp = cq->
getp;
2524 cqe = cq->
va + cur_getp;
2534 spin_unlock_irqrestore(&cq->cq_lock, flags);