32 #include <asm/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/sched.h>
38 #include <linux/pci.h>
40 #include <linux/slab.h>
56 if (!
strcmp(rdev->dev_name, dev_name))
82 ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p,
RDMA_CQ_OP, &setup);
92 if (
Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
101 while (
Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
109 cqe = cq->queue +
Q_PTR2IDX(rptr, cq->size_log2);
125 static int cxio_hal_clear_cq_ctx(
struct cxio_rdev *rdev_p,
u32 cqid)
132 setup.credit_thres = 0;
137 static int cxio_hal_clear_qp_ctx(
struct cxio_rdev *rdev_p,
u32 qpid)
143 PDBG(
"%s alloc_skb failed\n", __func__);
147 memset(wqe, 0,
sizeof(*wqe));
152 sge_cmd = qpid << 8 | 3;
213 if (!list_empty(&uctx->
qpids)) {
223 for (i = qpid+1; i & rdev_p->
qpmask; i++) {
233 PDBG(
"%s qpid 0x%x\n", __func__, qpid);
237 static void put_qpid(
struct cxio_rdev *rdev_p,
u32 qpid,
245 PDBG(
"%s qpid 0x%x\n", __func__, qpid);
260 list_del_init(&entry->
entry);
270 INIT_LIST_HEAD(&uctx->
qpids);
280 wq->
qpid = get_qpid(rdev_p, uctx);
297 depth *
sizeof(
union t3_wr),
309 PDBG(
"%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
319 put_qpid(rdev_p, wq->
qpid, uctx);
326 err = cxio_hal_clear_cq_ctx(rdev_p, cq->
cqid);
346 put_qpid(rdev_p, wq->
qpid, uctx);
350 static void insert_recv_cqe(
struct t3_wq *wq,
struct t3_cq *cq)
354 PDBG(
"%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
373 PDBG(
"%s wq %p cq %p\n", __func__, wq, cq);
376 PDBG(
"%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
380 insert_recv_cqe(wq, cq);
386 static void insert_sq_cqe(
struct t3_wq *wq,
struct t3_cq *cq,
391 PDBG(
"%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
417 insert_sq_cqe(wq, cq, sqp);
432 PDBG(
"%s cq %p cqid 0x%x\n", __func__, cq, cq->
cqid);
433 cqe = cxio_next_hw_cqe(cq);
435 PDBG(
"%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
442 cqe = cxio_next_hw_cqe(cq);
479 PDBG(
"%s cq %p count %d\n", __func__, cq, *count);
488 PDBG(
"%s count zero %d\n", __func__, *count);
493 (
CQE_QPID(*cqe) == wq->
qpid) && cqe_completes_wr(cqe, wq))
497 PDBG(
"%s cq %p count %d\n", __func__, cq, *count);
500 static int cxio_hal_init_ctrl_cq(
struct cxio_rdev *rdev_p)
509 setup.credit_thres = 0;
514 static int cxio_hal_init_ctrl_qp(
struct cxio_rdev *rdev_p)
517 u64 sge_cmd, ctx0, ctx1;
524 PDBG(
"%s alloc_skb failed\n", __func__);
527 err = cxio_hal_init_ctrl_cq(rdev_p);
529 PDBG(
"%s err %d initializing ctrl_cq\n", __func__, err);
539 PDBG(
"%s dma_alloc_coherent failed\n", __func__);
553 base_addr = rdev_p->
ctrl_qp.dma_addr;
560 ctx1 = (
u32) base_addr;
566 memset(wqe, 0,
sizeof(*wqe));
574 PDBG(
"CtrlQP dma_addr 0x%llx workq %p size %d\n",
575 (
unsigned long long) rdev_p->
ctrl_qp.dma_addr,
584 static int cxio_hal_destroy_ctrl_qp(
struct cxio_rdev *rdev_p)
600 u32 i, nr_wqe, copy_len;
607 nr_wqe = len % 96 ? len / 96 + 1 : len / 96;
608 PDBG(
"%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
612 for (i = 0; i < nr_wqe; i++) {
615 PDBG(
"%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
616 "wait for more space i %d\n", __func__,
622 PDBG(
"%s ctrl_qp workq interrupted\n",
626 PDBG(
"%s ctrl_qp wakeup, continue posting work request "
627 "i %d\n", __func__, i);
632 if (i == (nr_wqe - 1)) {
636 utx_len = len / 32 + 1;
648 PDBG(
"%s force completion at i %d\n", __func__, i);
655 utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
658 copy_data = (
u8 *) data + i * 96;
659 copy_len = len > 96 ? 96 : len;
663 memcpy(wqe, copy_data, copy_len);
668 32 - (copy_len % 32));
686 ring_doorbell(rdev_p->
ctrl_qp.doorbell, T3_CTRL_QP_ID);
697 static int __cxio_tpt_op(
struct cxio_rdev *rdev_p,
u32 reset_tpt_entry,
701 u32 pbl_size,
u32 pbl_addr)
708 if (cxio_fatal_error(rdev_p))
711 stag_state = stag_state > 0;
712 stag_idx = (*stag) >> 8;
718 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
720 PDBG(
"%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
721 __func__, stag_state, type, pdid, stag_idx);
727 memset(&tpt, 0,
sizeof(tpt));
738 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
743 tpt.rsvd_bind_cnt_or_pstag = 0;
744 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
747 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
767 u32 pbl_addr,
u32 pbl_size)
772 PDBG(
"%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
773 __func__, pbl_addr, rdev_p->
rnic_info.pbl_base,
777 err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
794 u8 page_size,
u32 pbl_size,
u32 pbl_addr)
798 zbva, to, len, page_size, pbl_size, pbl_addr);
803 u8 page_size,
u32 pbl_size,
u32 pbl_addr)
806 zbva, to, len, page_size, pbl_size, pbl_addr);
812 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
819 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid,
TPT_MW, 0, 0, 0ULL, 0, 0,
825 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
833 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
842 PDBG(
"%s rdev_p %p\n", __func__, rdev_p);
880 static int cxio_hal_ev_handler(
struct t3cdev *t3cdev_p,
struct sk_buff *skb)
885 PDBG(
"%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
886 " se %0x notify %0x cqbranch %0x creditth %0x\n",
891 PDBG(
"CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
892 "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
899 PDBG(
"%s called by t3cdev %p with null ulp\n", __func__,
910 (*cxio_ev_cb) (rdev_p,
skb);
923 if (cxio_hal_find_rdev_by_name(rdev_p->
dev_name)) {
932 if (cxio_hal_find_rdev_by_t3cdev(rdev_p->
t3cdev_p)) {
939 PDBG(
"%s t3cdev_p or dev_name must be set\n", __func__);
945 PDBG(
"%s opening rnic dev %s\n", __func__, rdev_p->
dev_name);
949 rdev_p->
t3cdev_p->ulp = (
void *) rdev_p;
960 "need version %u but adapter has version %u\n",
993 PDBG(
"%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
994 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
996 rdev_p->
rnic_info.tpt_top, cxio_num_stags(rdev_p),
1000 PDBG(
"udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
1001 "qpnr %d qpmask 0x%x\n",
1006 err = cxio_hal_init_ctrl_qp(rdev_p);
1038 cxio_hal_destroy_ctrl_qp(rdev_p);
1051 cxio_hal_destroy_ctrl_qp(rdev_p);
1075 static void flush_completed_wrs(
struct t3_wq *wq,
struct t3_cq *cq)
1091 PDBG(
"%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1104 static void create_read_req_cqe(
struct t3_wq *wq,
struct t3_cqe *hw_cqe,
1118 static void advance_oldest_read(
struct t3_wq *wq)
1153 struct t3_cqe *hw_cqe, read_cqe;
1157 hw_cqe = cxio_next_cqe(cq);
1159 PDBG(
"%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1160 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1199 create_read_req_cqe(wq, hw_cqe, &read_cqe);
1201 advance_oldest_read(wq);
1214 *cqe_flushed = wq->
error;
1283 PDBG(
"%s out of order completion going in swsq at idx %ld\n",
1303 PDBG(
"%s completing sq idx %ld\n", __func__,
1308 PDBG(
"%s completing rq idx %ld\n", __func__,
1323 flush_completed_wrs(wq, cq);
1327 PDBG(
"%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1331 PDBG(
"%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1339 || ((cq->
rptr - cq->
wptr) >= 128)) {