34 #include <linux/sched.h>
130 for (i = j = 0; i < wqe->
num_sge; i++) {
131 if (wqe->
sg_list[i].length == 0)
145 memset(&wc, 0,
sizeof(wc));
178 srq = to_isrq(qp->
ibqp.srq);
196 if (tail >= rq->
size)
205 wqe = get_rwqe_ptr(rq, tail);
206 if (++tail >= rq->
size)
231 if (n < srq->
limit) {
235 spin_unlock_irqrestore(&rq->
lock, flags);
244 spin_unlock_irqrestore(&rq->
lock, flags);
260 static void ipath_ruc_loopback(
struct ipath_qp *sqp)
290 wqe = get_swqe_ptr(sqp, sqp->
s_last);
310 spin_unlock_irqrestore(&sqp->
s_lock, flags);
330 sqp->
s_sge.num_sge = wqe->
wr.num_sge;
332 switch (wqe->
wr.opcode) {
335 wc.ex.imm_data = wqe->
wr.ex.imm_data;
346 wc.ex.imm_data = wqe->
wr.ex.imm_data;
356 wqe->
wr.wr.rdma.remote_addr,
357 wqe->
wr.wr.rdma.rkey,
366 wqe->
wr.wr.rdma.remote_addr,
367 wqe->
wr.wr.rdma.rkey,
372 qp->
r_sge.num_sge = wqe->
wr.num_sge;
380 wqe->
wr.wr.atomic.remote_addr,
381 wqe->
wr.wr.atomic.rkey,
386 sdata = wqe->
wr.wr.atomic.compare_add;
391 sdata, wqe->
wr.wr.atomic.swap);
399 sge = &sqp->
s_sge.sge;
413 if (--sqp->
s_sge.num_sge)
414 *sge = *sqp->
s_sge.sg_list++;
417 if (++sge->
m >= sge->
mr->mapsz)
422 sge->
mr->map[sge->
m]->segs[sge->
n].vaddr;
424 sge->
mr->map[sge->
m]->segs[sge->
n].length;
497 spin_unlock_irqrestore(&sqp->
s_lock, flags);
502 ev.element.qp = &sqp->
ibqp;
504 sqp->
ibqp.event_handler(&ev, sqp->
ibqp.qp_context);
511 spin_unlock_irqrestore(&sqp->
s_lock, flags);
527 ipath_read_kreg64(dd, dd->
ipath_kregs->kr_scratch);
541 static int ipath_no_bufs_available(
struct ipath_qp *qp,
564 spin_unlock_irqrestore(&qp->
s_lock, flags);
566 want_buffer(dev->
dd, qp);
597 return sizeof(
struct ib_grh) / sizeof(u32);
625 bth0 |= extra_bytes << 20;
649 ipath_ruc_loopback(qp);
665 spin_unlock_irqrestore(&qp->
s_lock, flags);
671 spin_unlock_irqrestore(&qp->
s_lock, flags);
682 if (ipath_no_bufs_available(qp, dev))
713 memset(&wc, 0,
sizeof wc);
724 old_last = last = qp->
s_last;
728 if (qp->
s_cur == old_last)
730 if (qp->
s_tail == old_last)