45 wr_len =
sizeof *res_wr +
sizeof *
res;
64 c4iw_init_wr_wait(&wr_wait);
67 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
112 wr_len =
sizeof *res_wr +
sizeof *
res;
122 memset(res_wr, 0, wr_len);
147 c4iw_init_wr_wait(&wr_wait);
152 PDBG(
"%s wait_event wr_wait %p\n", __func__, &wr_wait);
153 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
177 static void insert_recv_cqe(
struct t4_wq *wq,
struct t4_cq *cq)
181 PDBG(
"%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
200 PDBG(
"%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
201 wq, cq, wq->
rq.in_use, count);
203 insert_recv_cqe(wq, cq);
209 static void insert_sq_cqe(
struct t4_wq *wq,
struct t4_cq *cq,
214 PDBG(
"%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
237 insert_sq_cqe(wq, cq, swsqe);
239 if (swsqe == (wq->
sq.sw_sq + wq->
sq.size))
240 swsqe = wq->
sq.sw_sq;
254 PDBG(
"%s cq %p cqid 0x%x\n", __func__, cq, cq->
cqid);
255 ret = t4_next_hw_cqe(cq, &cqe);
257 PDBG(
"%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
264 ret = t4_next_hw_cqe(cq, &cqe);
294 wq->
sq.oldest_read)) &&
297 if (++ptr == cq->
size)
300 PDBG(
"%s cq %p count %d\n", __func__, cq, *count);
309 PDBG(
"%s count zero %d\n", __func__, *count);
314 (
CQE_QPID(cqe) == wq->
sq.qid) && cqe_completes_wr(cqe, wq))
316 if (++ptr == cq->
size)
319 PDBG(
"%s cq %p count %d\n", __func__, cq, *count);
322 static void flush_completed_wrs(
struct t4_wq *wq,
struct t4_cq *cq)
329 swsqe = &wq->
sq.sw_sq[
ptr];
332 if (++ptr == wq->
sq.size)
334 swsqe = &wq->
sq.sw_sq[
ptr];
341 PDBG(
"%s moving cqe into swcq sq idx %u cq idx %u\n",
347 wq->
sq.in_use -= unsignaled;
353 static void create_read_req_cqe(
struct t4_wq *wq,
struct t4_cqe *hw_cqe,
356 read_cqe->
u.
scqe.cidx = wq->
sq.oldest_read->idx;
368 static void advance_oldest_read(
struct t4_wq *wq)
373 if (rptr == wq->
sq.size)
375 while (rptr != wq->
sq.pidx) {
376 wq->
sq.oldest_read = &wq->
sq.sw_sq[
rptr];
380 if (++rptr == wq->
sq.size)
383 wq->
sq.oldest_read =
NULL;
406 struct t4_cqe *hw_cqe, read_cqe;
410 ret = t4_next_cqe(cq, &hw_cqe);
414 PDBG(
"%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
415 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
443 if (!wq->
sq.oldest_read) {
445 t4_set_wq_in_error(wq);
454 create_read_req_cqe(wq, hw_cqe, &read_cqe);
456 advance_oldest_read(wq);
459 if (
CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
460 *cqe_flushed = t4_wq_in_error(wq);
461 t4_set_wq_in_error(wq);
482 if (t4_rq_empty(wq)) {
483 t4_set_wq_in_error(wq);
488 t4_set_wq_in_error(wq);
509 PDBG(
"%s out of order completion going in sw_sq at idx %u\n",
512 swsqe->
cqe = *hw_cqe;
527 PDBG(
"%s completing sq idx %u\n", __func__, wq->
sq.cidx);
528 *cookie = wq->
sq.sw_sq[wq->
sq.cidx].wr_id;
531 PDBG(
"%s completing rq idx %u\n", __func__, wq->
rq.cidx);
532 *cookie = wq->
rq.sw_rq[wq->
rq.cidx].wr_id;
541 flush_completed_wrs(wq, cq);
545 PDBG(
"%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
549 PDBG(
"%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
565 static int c4iw_poll_cq_one(
struct c4iw_cq *chp,
struct ib_wc *
wc)
568 struct t4_cqe cqe = {0, 0}, *rd_cqe;
575 ret = t4_next_cqe(&chp->
cq, &rd_cqe);
584 spin_lock(&qhp->
lock);
587 ret = poll_cq(wq, &(chp->
cq), &cqe, &cqe_flushed, &cookie, &credit);
596 PDBG(
"%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
597 "lo 0x%x cookie 0x%llx\n", __func__,
CQE_QPID(&cqe),
642 "in the CQE received for QPID=0x%0x\n",
698 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
705 spin_unlock(&qhp->
lock);
716 chp = to_c4iw_cq(ibcq);
719 for (npolled = 0; npolled <
num_entries; ++npolled) {
721 err = c4iw_poll_cq_one(chp, wc + npolled);
726 spin_unlock_irqrestore(&chp->
lock, flags);
727 return !err || err == -
ENODATA ? npolled :
err;
735 PDBG(
"%s ib_cq %p\n", __func__, ib_cq);
736 chp = to_c4iw_cq(ib_cq);
738 remove_handle(chp->
rhp, &chp->
rhp->cqidr, chp->
cq.cqid);
742 ucontext = ib_cq->
uobject ? to_c4iw_ucontext(ib_cq->
uobject->context)
744 destroy_cq(&chp->
rhp->rdev, &chp->
cq,
745 ucontext ? &ucontext->
uctx : &chp->
cq.rdev->uctx);
762 PDBG(
"%s ib_dev %p entries %d\n", __func__, ibdev, entries);
764 rhp = to_c4iw_dev(ibdev);
771 ucontext = to_c4iw_ucontext(ib_context);
782 entries =
roundup(entries, 16);
787 hwentries = entries * 2;
796 memsize = hwentries *
sizeof *chp->
cq.queue;
803 hwentries = memsize /
sizeof *chp->
cq.queue;
806 hwentries = memsize /
sizeof *chp->
cq.queue;
809 chp->
cq.size = hwentries;
812 ret = create_cq(&rhp->
rdev, &chp->
cq,
813 ucontext ? &ucontext->
uctx : &rhp->
rdev.uctx);
819 chp->
ibcq.cqe = entries - 2;
824 ret = insert_handle(rhp, &rhp->
cqidr, chp, chp->
cq.cqid);
837 uresp.
cqid = chp->
cq.cqid;
838 uresp.
size = chp->
cq.size;
841 uresp.
key = ucontext->
key;
846 ret = ib_copy_to_udata(udata, &uresp,
sizeof uresp);
852 mm->
len = chp->
cq.memsize;
853 insert_mmap(ucontext, mm);
858 insert_mmap(ucontext, mm2);
860 PDBG(
"%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
861 __func__, chp->
cq.cqid, chp, chp->
cq.size,
863 (
unsigned long long) chp->
cq.dma_addr);
870 remove_handle(rhp, &rhp->
cqidr, chp->
cq.cqid);
872 destroy_cq(&chp->
rhp->rdev, &chp->
cq,
873 ucontext ? &ucontext->
uctx : &rhp->
rdev.uctx);
890 chp = to_c4iw_cq(ibcq);
892 ret = t4_arm_cq(&chp->
cq,
894 spin_unlock_irqrestore(&chp->
lock, flag);