33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/sched.h>
72 static void *get_wqe(
struct mthca_srq *srq,
int n)
90 static inline int *wqe_to_link(
void *wqe)
95 static void mthca_tavor_init_srq_context(
struct mthca_dev *
dev,
100 memset(context, 0,
sizeof *context);
106 if (pd->
ibpd.uobject)
113 static void mthca_arbel_init_srq_context(
struct mthca_dev *dev,
120 memset(context, 0,
sizeof *context);
127 logsize =
ilog2(max);
132 if (pd->
ibpd.uobject)
155 if (pd->
ibpd.uobject)
175 for (i = 0; i < srq->
max; ++
i) {
178 next = wqe = get_wqe(srq, i);
180 if (i < srq->max - 1) {
181 *wqe_to_link(wqe) = i + 1;
184 *wqe_to_link(wqe) = -1;
189 (
void *) scatter < wqe + (1 << srq->
wqe_shift);
194 srq->
last = get_wqe(srq, srq->
max - 1);
215 if (mthca_is_memfree(dev))
224 if (!mthca_is_memfree(dev) && (ds > dev->
limits.max_desc_sz))
233 if (mthca_is_memfree(dev)) {
238 if (!pd->
ibpd.uobject) {
249 if (IS_ERR(mailbox)) {
250 err = PTR_ERR(mailbox);
254 err = mthca_alloc_srq_buf(dev, pd, srq);
256 goto err_out_mailbox;
263 if (mthca_is_memfree(dev))
264 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->
buf);
266 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->
buf);
271 mthca_warn(dev,
"SW2HW_SRQ failed (%d)\n", err);
272 goto err_out_free_buf;
280 goto err_out_free_srq;
297 mthca_warn(dev,
"HW2SW_SRQ failed (%d)\n", err);
300 if (!pd->
ibpd.uobject)
301 mthca_free_srq_buf(dev, srq);
307 if (!pd->
ibpd.uobject && mthca_is_memfree(dev))
336 if (IS_ERR(mailbox)) {
337 mthca_warn(dev,
"No memory for mailbox to free SRQ.\n");
343 mthca_warn(dev,
"HW2SW_SRQ failed (%d)\n", err);
353 if (!srq->
ibsrq.uobject) {
354 mthca_free_srq_buf(dev, srq);
355 if (mthca_is_memfree(dev))
376 u32 max_wr = mthca_is_memfree(dev) ? srq->
max - 1 : srq->
max;
399 return PTR_ERR(mailbox);
405 if (mthca_is_memfree(dev)) {
406 arbel_ctx = mailbox->
buf;
409 tavor_ctx = mailbox->
buf;
435 mthca_warn(dev,
"Async event for bogus SRQ %08x\n", srqn);
439 if (!srq->
ibsrq.event_handler)
442 event.device = &dev->
ib_dev;
444 event.element.srq = &srq->
ibsrq;
445 srq->
ibsrq.event_handler(&event, srq->
ibsrq.srq_context);
464 spin_lock(&srq->
lock);
466 last_free = get_wqe(srq, srq->
last_free);
467 *wqe_to_link(last_free) =
ind;
469 *wqe_to_link(get_wqe(srq, ind)) = -1;
472 spin_unlock(&srq->
lock);
494 for (nreq = 0;
wr; wr = wr->
next) {
496 wqe = get_wqe(srq, ind);
497 next_ind = *wqe_to_link(wqe);
506 prev_wqe = srq->
last;
517 srq->
last = prev_wqe;
522 mthca_set_data_seg(wqe, wr->
sg_list + i);
527 mthca_set_data_seg_inval(wqe);
560 mthca_write64(first_ind << srq->
wqe_shift, (srq->
srqn << 8) | nreq,
571 spin_unlock_irqrestore(&srq->
lock, flags);
590 for (nreq = 0;
wr; ++nreq, wr = wr->
next) {
592 wqe = get_wqe(srq, ind);
593 next_ind = *wqe_to_link(wqe);
613 for (i = 0; i < wr->num_sge; ++
i) {
614 mthca_set_data_seg(wqe, wr->sg_list + i);
619 mthca_set_data_seg_inval(wqe);
636 spin_unlock_irqrestore(&srq->
lock, flags);
642 if (mthca_is_memfree(dev))
643 return dev->
limits.max_sg;
660 ((1 << (fls(dev->
limits.max_desc_sz) - 1)) -
677 dev->
limits.reserved_srqs);