89 rkt = &to_idev(qp->
ibqp.device)->lk_table;
90 pd = to_ipd(qp->
ibqp.srq ? qp->
ibqp.srq->pd : qp->
ibqp.pd);
94 for (i = j = 0; i < wqe->
num_sge; i++) {
95 if (wqe->
sg_list[i].length == 0)
150 srq = to_isrq(qp->
ibqp.srq);
168 if (tail >= rq->
size)
176 wqe = get_rwqe_ptr(rq, tail);
182 if (++tail >= rq->
size)
185 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
207 if (n < srq->
limit) {
211 spin_unlock_irqrestore(&rq->lock, flags);
220 spin_unlock_irqrestore(&rq->lock, flags);
241 qp->
ibqp.event_handler(&ev, qp->
ibqp.qp_context);
251 return ibp->
guids[index - 1];
256 return (gid->
global.interface_id ==
id &&
257 (gid->
global.subnet_prefix == gid_prefix ||
268 int has_grh,
struct qib_qp *qp,
u32 bth0)
280 guid = get_sguid(ibp, qp->
alt_ah_attr.grh.sgid_index);
283 if (!gid_ok(&hdr->
u.
l.grh.sgid,
288 if (!qib_pkey_ok((
u16)bth0,
294 hdr->
lrh[3], hdr->
lrh[1]);
303 spin_unlock_irqrestore(&qp->s_lock, flags);
311 guid = get_sguid(ibp,
315 if (!gid_ok(&hdr->
u.
l.grh.sgid,
320 if (!qib_pkey_ok((
u16)bth0,
326 hdr->
lrh[3], hdr->
lrh[1]);
334 !(bth0 & IB_BTH_MIG_REQ))
355 static void qib_ruc_loopback(
struct qib_qp *sqp)
387 wqe = get_swqe_ptr(sqp, sqp->
s_last);
407 spin_unlock_irqrestore(&sqp->s_lock, flags);
410 qp->
ibqp.qp_type != sqp->
ibqp.qp_type) {
429 sqp->
s_sge.num_sge = wqe->
wr.num_sge;
431 switch (wqe->
wr.opcode) {
434 wc.ex.imm_data = wqe->
wr.ex.imm_data;
448 wc.ex.imm_data = wqe->
wr.ex.imm_data;
461 wqe->
wr.wr.rdma.remote_addr,
462 wqe->
wr.wr.rdma.rkey,
466 qp->
r_sge.num_sge = 1;
474 wqe->
wr.wr.rdma.remote_addr,
475 wqe->
wr.wr.rdma.rkey,
480 sqp->
s_sge.num_sge = 1;
483 qp->
r_sge.num_sge = wqe->
wr.num_sge;
492 wqe->
wr.wr.atomic.remote_addr,
493 wqe->
wr.wr.atomic.rkey,
498 sdata = wqe->
wr.wr.atomic.compare_add;
503 sdata, wqe->
wr.wr.atomic.swap);
504 qib_put_mr(qp->
r_sge.sge.mr);
505 qp->
r_sge.num_sge = 0;
513 sge = &sqp->
s_sge.sge;
529 if (--sqp->
s_sge.num_sge)
530 *sge = *sqp->
s_sge.sg_list++;
531 }
else if (sge->
length == 0 && sge->
mr->lkey) {
533 if (++sge->
m >= sge->
mr->mapsz)
538 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
540 sge->
mr->map[sge->
m]->segs[sge->
n].length;
545 qib_put_ss(&qp->
r_sge);
623 spin_unlock_irqrestore(&sqp->s_lock, flags);
628 ev.element.qp = &sqp->
ibqp;
630 sqp->
ibqp.event_handler(&ev, sqp->
ibqp.qp_context);
637 spin_unlock_irqrestore(&sqp->s_lock, flags);
671 return sizeof(
struct ib_grh) / sizeof(u32);
700 bth0 |= extra_bytes << 20;
727 qib_ruc_loopback(qp);
741 if (!qib_send_ok(qp)) {
742 spin_unlock_irqrestore(&qp->s_lock, flags);
748 spin_unlock_irqrestore(&qp->s_lock, flags);
763 }
while (make_req(qp));
778 for (i = 0; i < wqe->
wr.num_sge; i++) {
794 memset(&wc, 0,
sizeof wc);
812 if (qp->
s_cur == old_last)
814 if (qp->
s_tail == old_last)