44 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
54 spin_unlock_irqrestore(&c2dev->
lock, flags);
58 spin_unlock_irqrestore(&c2dev->
lock, flags);
62 static void c2_cq_put(
struct c2_cq *cq)
72 cq = c2_cq_get(c2dev, mq_index);
74 printk(
"discarding events on destroyed CQN=%d\n", mq_index);
78 (*cq->
ibcq.comp_handler) (&cq->
ibcq, cq->
ibcq.cq_context);
87 cq = c2_cq_get(c2dev, mq_index);
91 spin_lock_irq(&cq->
lock);
93 if (q && !c2_mq_empty(q)) {
103 priv = (priv + 1) % q->
q_size;
106 spin_unlock_irq(&cq->
lock);
131 static inline int c2_poll_one(
struct c2_dev *c2dev,
156 entry->
status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
166 switch (c2_wr_get_id(ce)) {
204 struct c2_cq *cq = to_c2cq(ibcq);
210 for (npolled = 0; npolled <
num_entries; ++npolled) {
212 err = c2_poll_one(c2dev, cq, entry + npolled);
217 spin_unlock_irqrestore(&cq->
lock, flags);
230 shared = cq->
mq.peer;
250 ret = !c2_mq_empty(&cq->
mq);
251 spin_unlock_irqrestore(&cq->
lock, flags);
257 static void c2_free_cq_buf(
struct c2_dev *c2dev,
struct c2_mq *
mq)
263 static int c2_alloc_cq_buf(
struct c2_dev *c2dev,
struct c2_mq *mq,
int q_size,
291 unsigned long peer_pa;
297 cq->
ibcq.cqe = entries - 1;
317 memset(&wr, 0,
sizeof(wr));
319 wr.
hdr.context = (
unsigned long) vq_req;
345 if ((err = c2_errno(reply)) != 0)
369 cq->
cqn = cq->
mq.index;
379 c2_free_cq_buf(c2dev, &cq->
mq);
396 spin_lock_irq(&c2dev->
lock);
399 spin_unlock_irq(&c2dev->
lock);
408 memset(&wr, 0,
sizeof(wr));
410 wr.
hdr.context = (
unsigned long) vq_req;
433 c2_free_cq_buf(c2dev, &cq->
mq);