36 #include <linux/slab.h>
41 static void mlx4_ib_cq_comp(
struct mlx4_cq *
cq)
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
53 pr_warn(
"Unexpected event type %d "
54 "on CQ %06x\n", type, cq->
cqn);
58 ibcq = &to_mibcq(cq)->ibcq;
62 event.element.cq = ibcq;
72 static void *get_cqe(
struct mlx4_ib_cq *cq,
int n)
74 return get_cqe_from_buf(&cq->
buf, n);
77 static void *get_sw_cqe(
struct mlx4_ib_cq *cq,
int n)
82 !!(n & (cq->
ibcq.cqe + 1))) ?
NULL : cqe;
87 return get_sw_cqe(cq, cq->
mcq.cons_index);
144 return PTR_ERR(*umem);
147 ilog2((*umem)->page_size), &buf->
mtt);
175 if (entries < 1 || entries > dev->
dev->caps.max_cqes)
183 cq->
ibcq.cqe = entries - 1;
192 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd)) {
197 err = mlx4_ib_get_cq_umem(dev, context, &cq->
buf, &cq->
umem,
207 uar = &to_mucontext(context)->uar;
213 cq->
mcq.set_ci_db = cq->
db.db;
214 cq->
mcq.arm_db = cq->
db.db + 1;
215 *cq->
mcq.set_ci_db = 0;
218 err = mlx4_ib_alloc_cq_buf(dev, &cq->
buf, entries);
229 cq->
db.dma, &cq->
mcq, vector, 0);
233 cq->
mcq.comp = mlx4_ib_cq_comp;
234 cq->
mcq.event = mlx4_ib_cq_event;
254 mlx4_ib_free_cq_buf(dev, &cq->
buf, cq->
ibcq.cqe);
278 err = mlx4_ib_alloc_cq_buf(dev, &cq->
resize_buf->buf, entries);
291 int entries,
struct ib_udata *udata)
299 if (ib_copy_from_udata(&ucmd, udata,
sizeof ucmd))
306 err = mlx4_ib_get_cq_umem(dev, cq->
umem->context, &cq->
resize_buf->buf,
319 static int mlx4_ib_get_outstanding_cqes(
struct mlx4_ib_cq *cq)
323 i = cq->
mcq.cons_index;
324 while (get_sw_cqe(cq, i & cq->
ibcq.cqe))
327 return i - cq->
mcq.cons_index;
330 static void mlx4_ib_cq_resize_copy_cqes(
struct mlx4_ib_cq *cq)
335 i = cq->
mcq.cons_index;
336 cqe = get_cqe(cq, i & cq->
ibcq.cqe);
338 new_cqe = get_cqe_from_buf(&cq->
resize_buf->buf,
343 cqe = get_cqe(cq, ++i & cq->
ibcq.cqe);
345 ++cq->
mcq.cons_index;
358 if (entries < 1 || entries > dev->
dev->caps.max_cqes) {
364 if (entries == ibcq->
cqe + 1) {
370 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
375 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
376 if (entries < outst_cqe + 1) {
381 err = mlx4_alloc_resize_buf(dev, cq, entries);
406 spin_lock_irq(&cq->
lock);
408 mlx4_ib_cq_resize_copy_cqes(cq);
410 tmp_cqe = cq->
ibcq.cqe;
417 spin_unlock_irq(&cq->
lock);
420 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
428 mlx4_ib_free_cq_buf(dev, &cq->
resize_buf->buf,
456 mlx4_ib_free_cq_buf(dev, &mcq->
buf, cq->
cqe);
465 static void dump_cqe(
void *cqe)
469 pr_debug(
"CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
475 static void mlx4_ib_handle_error_cqe(
struct mlx4_err_cqe *cqe,
480 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
555 ib_dma_sync_single_for_cpu(qp->
ibqp.device,
570 static int mlx4_ib_poll_one(
struct mlx4_ib_cq *cq,
585 cqe = next_cqe_sw(cq);
589 ++cq->
mcq.cons_index;
603 pr_warn(
"Completion for NOP opcode detected!\n");
612 mlx4_ib_free_cq_buf(dev, &cq->
buf, cq->
ibcq.cqe);
630 mqp = __mlx4_qp_lookup(to_mdev(cq->
ibcq.device)->dev,
633 pr_warn(
"CQ %06x with entry for unknown QPN %06x\n",
638 *cur_qp = to_mibqp(mqp);
641 wc->
qp = &(*cur_qp)->ibqp;
645 if (!(*cur_qp)->sq_signal_bits) {
651 }
else if ((*cur_qp)->ibqp.srq) {
652 srq = to_msrq((*cur_qp)->ibqp.srq);
664 mlx4_ib_handle_error_cqe((
struct mlx4_err_cqe *) cqe, wc);
742 if (mlx4_is_mfunc(to_mdev(cq->
ibcq.device)->dev)) {
743 if ((*cur_qp)->mlx4_ib_qp_type &
746 return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
751 wc->
src_qp = g_mlpath_rqpn & 0xffffff;
777 for (npolled = 0; npolled <
num_entries; ++npolled) {
778 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
783 mlx4_cq_set_ci(&cq->
mcq);
785 spin_unlock_irqrestore(&cq->
lock, flags);
787 if (err == 0 || err == -
EAGAIN)
795 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
798 to_mdev(ibcq->
device)->uar_map,
818 for (prod_index = cq->
mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
819 if (prod_index == cq->
mcq.cons_index + cq->
ibcq.cqe)
826 while ((
int) --prod_index - (
int) cq->
mcq.cons_index >= 0) {
827 cqe = get_cqe(cq, prod_index & cq->
ibcq.cqe);
833 dest = get_cqe(cq, (prod_index + nfreed) & cq->
ibcq.cqe);
835 memcpy(dest, cqe,
sizeof *cqe);
842 cq->
mcq.cons_index += nfreed;
848 mlx4_cq_set_ci(&cq->
mcq);
854 spin_lock_irq(&cq->
lock);
856 spin_unlock_irq(&cq->
lock);