32 #include <linux/sched.h>
54 wqe->
send.rem_stag = 0;
68 wqe->
send.reserved[0] = 0;
69 wqe->
send.reserved[1] = 0;
70 wqe->
send.reserved[2] = 0;
72 for (i = 0; i < wr->
num_sge; i++) {
73 if ((plen + wr->
sg_list[i].length) < plen)
82 *flit_cnt = 4 + ((wr->
num_sge) << 1);
95 wqe->
write.reserved[0] = 0;
96 wqe->
write.reserved[1] = 0;
97 wqe->
write.reserved[2] = 0;
109 for (i = 0; i < wr->
num_sge; i++) {
110 if ((plen + wr->
sg_list[i].length) < plen) {
122 *flit_cnt = 5 + ((wr->
num_sge) << 1);
135 wqe->
read.local_inv = 1;
137 wqe->
read.local_inv = 0;
138 wqe->
read.reserved[0] = 0;
139 wqe->
read.reserved[1] = 0;
150 u8 *flit_cnt,
int *wr_cnt,
struct t3_wq *wq)
168 p = &wqe->
fastreg.pbl_addrs[0];
169 for (i = 0; i < wr->
wr.
fast_reg.page_list_len; i++, p++) {
185 *flit_cnt = 5 + wr->
wr.
fast_reg.page_list_len;
206 for (i = 0; i < num_sgle; i++) {
208 mhp = get_mhp(rhp, (sg_list[i].
lkey) >> 8);
210 PDBG(
"%s %d\n", __func__, __LINE__);
213 if (!mhp->
attr.state) {
214 PDBG(
"%s %d\n", __func__, __LINE__);
217 if (mhp->
attr.zbva) {
218 PDBG(
"%s %d\n", __func__, __LINE__);
222 if (sg_list[i].addr < mhp->
attr.va_fbo) {
223 PDBG(
"%s %d\n", __func__, __LINE__);
228 PDBG(
"%s %d\n", __func__, __LINE__);
231 if (sg_list[i].addr + ((
u64) sg_list[i].length) >
233 PDBG(
"%s %d\n", __func__, __LINE__);
236 offset = sg_list[
i].
addr - mhp->
attr.va_fbo;
237 offset += mhp->
attr.va_fbo &
238 ((1
UL << (12 + mhp->
attr.page_size)) - 1);
239 pbl_addr[
i] = ((mhp->
attr.pbl_addr -
240 rhp->
rdev.rnic_info.pbl_base) >> 3) +
241 (offset >> (12 + mhp->
attr.page_size));
242 page_size[
i] = mhp->
attr.page_size;
247 static int build_rdma_recv(
struct iwch_qp *qhp,
union t3_wr *wqe,
258 wqe->
recv.pagesz[0] = page_size[0];
259 wqe->
recv.pagesz[1] = page_size[1];
260 wqe->
recv.pagesz[2] = page_size[2];
261 wqe->
recv.pagesz[3] = page_size[3];
263 for (i = 0; i < wr->
num_sge; i++) {
269 ((1
UL << (12 + page_size[i])) - 1));
275 wqe->
recv.sgl[
i].stag = 0;
276 wqe->
recv.sgl[
i].len = 0;
277 wqe->
recv.sgl[
i].to = 0;
278 wqe->
recv.pbl_addr[
i] = 0;
281 qhp->
wq.rq_size_log2)].wr_id = wr->
wr_id;
283 qhp->
wq.rq_size_log2)].pbl_addr = 0;
287 static int build_zero_stag_recv(
struct iwch_qp *qhp,
union t3_wr *wqe,
308 pbl_offset = (pbl_addr - qhp->
rhp->rdev.rnic_info.pbl_base) >> 3;
312 for (i = 0; i < wr->
num_sge; i++) {
330 wqe->
recv.sgl[
i].stag = 0;
337 wqe->
recv.pagesz[
i] = 0;
338 wqe->
recv.sgl[
i].stag = 0;
339 wqe->
recv.sgl[
i].len = 0;
340 wqe->
recv.sgl[
i].to = 0;
341 wqe->
recv.pbl_addr[
i] = 0;
344 qhp->
wq.rq_size_log2)].wr_id = wr->
wr_id;
346 qhp->
wq.rq_size_log2)].pbl_addr = pbl_addr;
365 qhp = to_iwch_qp(ibqp);
368 spin_unlock_irqrestore(&qhp->
lock, flag);
373 qhp->
wq.sq_size_log2);
375 spin_unlock_irqrestore(&qhp->
lock, flag);
385 wqe = (
union t3_wr *) (qhp->
wq.queue + idx);
399 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
404 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
410 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
414 if (!qhp->
wq.oldest_read)
415 qhp->
wq.oldest_read = sqp;
419 err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
426 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
429 PDBG(
"%s post of type=%d TBD!\n", __func__,
435 wqe->
send.wrid.id0.hi = qhp->
wq.sq_wptr;
437 sqp->
opcode = wr2opcode(t3_wr_opcode);
442 build_fw_riwrh((
void *) wqe, t3_wr_opcode, t3_wr_flags,
446 PDBG(
"%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
447 __func__, (
unsigned long long) wr->
wr_id, idx,
452 qhp->
wq.wptr += wr_cnt;
455 spin_unlock_irqrestore(&qhp->
lock, flag);
456 if (cxio_wq_db_enabled(&qhp->
wq))
457 ring_doorbell(qhp->
wq.doorbell, qhp->
wq.qpid);
475 qhp = to_iwch_qp(ibqp);
478 spin_unlock_irqrestore(&qhp->
lock, flag);
483 qhp->
wq.rq_size_log2) - 1;
485 spin_unlock_irqrestore(&qhp->
lock, flag);
495 wqe = (
union t3_wr *) (qhp->
wq.queue + idx);
498 err = build_rdma_recv(qhp, wqe, wr);
500 err = build_zero_stag_recv(qhp, wqe, wr);
510 PDBG(
"%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
511 "wqe %p \n", __func__, (
unsigned long long) wr->
wr_id,
512 idx, qhp->
wq.rq_wptr, qhp->
wq.rq_rptr, wqe);
518 spin_unlock_irqrestore(&qhp->
lock, flag);
519 if (cxio_wq_db_enabled(&qhp->
wq))
520 ring_doorbell(qhp->
wq.doorbell, qhp->
wq.qpid);
546 qhp = to_iwch_qp(qp);
547 mhp = to_iwch_mw(mw);
552 spin_unlock_irqrestore(&qhp->
lock, flag);
556 qhp->
wq.sq_size_log2);
558 spin_unlock_irqrestore(&qhp->
lock, flag);
562 PDBG(
"%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
564 wqe = (
union t3_wr *) (qhp->
wq.queue + idx);
571 sgl.
lkey = mw_bind->
mr->lkey;
573 wqe->
bind.reserved = 0;
582 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
584 spin_unlock_irqrestore(&qhp->
lock, flag);
587 wqe->
send.wrid.id0.hi = qhp->
wq.sq_wptr;
596 build_fw_riwrh((
void *)wqe,
T3_WR_BIND, t3_wr_flags,
601 spin_unlock_irqrestore(&qhp->
lock, flag);
603 if (cxio_wq_db_enabled(&qhp->
wq))
604 ring_doorbell(qhp->
wq.doorbell, qhp->
wq.qpid);
609 static inline void build_term_codes(
struct respQ_msg_t *rsp_msg,
610 u8 *layer_type,
u8 *ecode)
747 PDBG(
"%s enter\n", __func__);
756 wqe->
read.reserved[0] = 0;
757 wqe->
read.reserved[1] = 0;
779 PDBG(
"%s %d\n", __func__, __LINE__);
812 PDBG(
"%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
815 spin_unlock(&qhp->
lock);
818 spin_lock(&rchp->
lock);
819 spin_lock(&qhp->
lock);
823 spin_unlock(&qhp->
lock);
824 spin_unlock(&rchp->
lock);
827 (*rchp->
ibcq.comp_handler)(&rchp->
ibcq, rchp->
ibcq.cq_context);
832 spin_lock(&schp->
lock);
833 spin_lock(&qhp->
lock);
837 spin_unlock(&qhp->
lock);
838 spin_unlock(&schp->
lock);
841 (*schp->
ibcq.comp_handler)(&schp->
ibcq, schp->
ibcq.cq_context);
849 spin_lock(&qhp->
lock);
852 static void flush_qp(
struct iwch_qp *qhp)
856 rchp = get_chp(qhp->
rhp, qhp->
attr.rcq);
857 schp = get_chp(qhp->
rhp, qhp->
attr.scq);
859 if (qhp->
ibqp.uobject) {
860 cxio_set_wq_in_error(&qhp->
wq);
861 cxio_set_cq_in_error(&rchp->
cq);
863 (*rchp->
ibcq.comp_handler)(&rchp->
ibcq, rchp->
ibcq.cq_context);
866 cxio_set_cq_in_error(&schp->
cq);
868 (*schp->
ibcq.comp_handler)(&schp->
ibcq,
869 schp->
ibcq.cq_context);
874 __flush_qp(qhp, rchp, schp);
883 union t3_wr *wqe = qhp->
wq.queue;
889 PDBG(
"%s qhp %p count %u\n", __func__, qhp, count);
900 init_attr.tid = qhp->
ep->hwtid;
901 init_attr.qpid = qhp->
wq.qpid;
902 init_attr.pdid = qhp->
attr.pd;
903 init_attr.scqid = qhp->
attr.scq;
904 init_attr.rcqid = qhp->
attr.rcq;
905 init_attr.rq_addr = qhp->
wq.rq_addr;
906 init_attr.rq_size = 1 << qhp->
wq.rq_size_log2;
908 qhp->
attr.mpa_attr.recv_marker_enabled |
909 (qhp->
attr.mpa_attr.xmit_marker_enabled << 1) |
910 (qhp->
attr.mpa_attr.crc_enabled << 2);
915 if (!qhp->
ibqp.uobject)
919 init_attr.tcp_emss = qhp->
ep->emss;
920 init_attr.ord = qhp->
attr.max_ord;
921 init_attr.ird = qhp->
attr.max_ird;
922 init_attr.qp_dma_addr = qhp->
wq.dma_addr;
923 init_attr.qp_dma_size = (1
UL << qhp->
wq.size_log2);
926 init_attr.chan = qhp->
ep->l2t->smt_idx;
929 if (init_attr.ord == 0 && qhp->
attr.mpa_attr.initiator)
931 if (init_attr.ird == 0 && !qhp->
attr.mpa_attr.initiator)
934 init_attr.rtr_type = 0;
935 init_attr.irs = qhp->
ep->rcv_seq;
936 PDBG(
"%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
937 "flags 0x%x qpcaps 0x%x\n", __func__,
938 init_attr.rq_addr, init_attr.rq_size,
939 init_attr.flags, init_attr.qpcaps);
941 PDBG(
"%s ret %d\n", __func__, ret);
959 PDBG(
"%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
960 qhp, qhp->
wq.qpid, qhp->
ep, qhp->
attr.state,
979 rhp->
attr.max_rdma_read_qp_depth) {
987 rhp->
attr.max_rdma_reads_per_qp) {
996 if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
1001 switch (qhp->
attr.state) {
1015 qhp->
ep = qhp->
attr.llp_stream_handle;
1025 spin_unlock_irqrestore(&qhp->
lock, flag);
1026 ret = rdma_init(rhp, qhp, mask, attrs);
1054 if (qhp->
ibqp.uobject)
1055 cxio_set_wq_in_error(&qhp->
wq);
1083 qhp->
attr.llp_stream_handle =
NULL;
1117 __func__, qhp->
attr.state);
1124 PDBG(
"%s disassociating ep %p qpid 0x%x\n", __func__, qhp->
ep,
1128 qhp->
attr.llp_stream_handle =
NULL;
1137 spin_unlock_irqrestore(&qhp->
lock, flag);
1159 PDBG(
"%s exit state %d\n", __func__, qhp->
attr.state);