Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mthca_cq.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses. You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  * Redistribution and use in source and binary forms, with or
15  * without modification, are permitted provided that the following
16  * conditions are met:
17  *
18  * - Redistributions of source code must retain the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer.
21  *
22  * - Redistributions in binary form must reproduce the above
23  * copyright notice, this list of conditions and the following
24  * disclaimer in the documentation and/or other materials
25  * provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36 
37 #include <linux/gfp.h>
38 #include <linux/hardirq.h>
39 #include <linux/sched.h>
40 
41 #include <asm/io.h>
42 
43 #include <rdma/ib_pack.h>
44 
45 #include "mthca_dev.h"
46 #include "mthca_cmd.h"
47 #include "mthca_memfree.h"
48 
49 enum {
51 };
52 
53 enum {
55 };
56 
57 enum {
59 };
60 
61 /*
62  * Must be packed because start is 64 bits but only aligned to 32 bits.
63  */
68  __be32 error_eqn; /* Tavor only */
77  __be32 ci_db; /* Arbel only */
78  __be32 state_db; /* Arbel only */
80 } __attribute__((packed));
81 
82 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
83 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
84 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
85 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
86 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
87 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
88 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
89 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
90 #define MTHCA_EQ_STATE_FIRED (10 << 8)
91 
92 enum {
94 };
95 
96 enum {
115 };
116 
117 struct mthca_cqe {
131 };
132 
144 };
145 
146 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
147 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
148 
149 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
150 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
151 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
152 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
153 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
154 
155 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
156 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
157 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
158 
159 static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
160  int entry)
161 {
162  if (buf->is_direct)
163  return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
164  else
165  return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
166  + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
167 }
168 
169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
170 {
171  return get_cqe_from_buf(&cq->buf, entry);
172 }
173 
174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
175 {
176  return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
177 }
178 
179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
180 {
181  return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
182 }
183 
184 static inline void set_cqe_hw(struct mthca_cqe *cqe)
185 {
187 }
188 
189 static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
190 {
191  __be32 *cqe = cqe_ptr;
192 
193  (void) cqe; /* avoid warning if mthca_dbg compiled away... */
194  mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
195  be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
196  be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
197  be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
198 }
199 
200 /*
201  * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
202  * should be correct before calling update_cons_index().
203  */
204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
205  int incr)
206 {
207  if (mthca_is_memfree(dev)) {
208  *cq->set_ci_db = cpu_to_be32(cq->cons_index);
209  wmb();
210  } else {
211  mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
212  dev->kar + MTHCA_CQ_DOORBELL,
213  MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
214  /*
215  * Make sure doorbells don't leak out of CQ spinlock
216  * and reach the HCA out of order:
217  */
218  mmiowb();
219  }
220 }
221 
223 {
224  struct mthca_cq *cq;
225 
226  cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
227 
228  if (!cq) {
229  mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
230  return;
231  }
232 
233  ++cq->arm_sn;
234 
235  cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
236 }
237 
238 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
240 {
241  struct mthca_cq *cq;
242  struct ib_event event;
243 
244  spin_lock(&dev->cq_table.lock);
245 
246  cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
247  if (cq)
248  ++cq->refcount;
249 
250  spin_unlock(&dev->cq_table.lock);
251 
252  if (!cq) {
253  mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
254  return;
255  }
256 
257  event.device = &dev->ib_dev;
258  event.event = event_type;
259  event.element.cq = &cq->ibcq;
260  if (cq->ibcq.event_handler)
261  cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
262 
263  spin_lock(&dev->cq_table.lock);
264  if (!--cq->refcount)
265  wake_up(&cq->wait);
266  spin_unlock(&dev->cq_table.lock);
267 }
268 
269 static inline int is_recv_cqe(struct mthca_cqe *cqe)
270 {
271  if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
273  return !(cqe->opcode & 0x01);
274  else
275  return !(cqe->is_send & 0x80);
276 }
277 
278 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
279  struct mthca_srq *srq)
280 {
281  struct mthca_cqe *cqe;
282  u32 prod_index;
283  int i, nfreed = 0;
284 
285  spin_lock_irq(&cq->lock);
286 
287  /*
288  * First we need to find the current producer index, so we
289  * know where to start cleaning from. It doesn't matter if HW
290  * adds new entries after this loop -- the QP we're worried
291  * about is already in RESET, so the new entries won't come
292  * from our QP and therefore don't need to be checked.
293  */
294  for (prod_index = cq->cons_index;
295  cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
296  ++prod_index)
297  if (prod_index == cq->cons_index + cq->ibcq.cqe)
298  break;
299 
300  if (0)
301  mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
302  qpn, cq->cqn, cq->cons_index, prod_index);
303 
304  /*
305  * Now sweep backwards through the CQ, removing CQ entries
306  * that match our QP by copying older entries on top of them.
307  */
308  while ((int) --prod_index - (int) cq->cons_index >= 0) {
309  cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
310  if (cqe->my_qpn == cpu_to_be32(qpn)) {
311  if (srq && is_recv_cqe(cqe))
312  mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
313  ++nfreed;
314  } else if (nfreed)
315  memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
316  cqe, MTHCA_CQ_ENTRY_SIZE);
317  }
318 
319  if (nfreed) {
320  for (i = 0; i < nfreed; ++i)
321  set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
322  wmb();
323  cq->cons_index += nfreed;
324  update_cons_index(dev, cq, nfreed);
325  }
326 
327  spin_unlock_irq(&cq->lock);
328 }
329 
331 {
332  int i;
333 
334  /*
335  * In Tavor mode, the hardware keeps the consumer and producer
336  * indices mod the CQ size. Since we might be making the CQ
337  * bigger, we need to deal with the case where the producer
338  * index wrapped around before the CQ was resized.
339  */
340  if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
341  cq->ibcq.cqe < cq->resize_buf->cqe) {
342  cq->cons_index &= cq->ibcq.cqe;
343  if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
344  cq->cons_index -= cq->ibcq.cqe + 1;
345  }
346 
347  for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
348  memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
349  i & cq->resize_buf->cqe),
350  get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
351 }
352 
353 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
354 {
355  int ret;
356  int i;
357 
358  ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
360  &buf->queue, &buf->is_direct,
361  &dev->driver_pd, 1, &buf->mr);
362  if (ret)
363  return ret;
364 
365  for (i = 0; i < nent; ++i)
366  set_cqe_hw(get_cqe_from_buf(buf, i));
367 
368  return 0;
369 }
370 
371 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
372 {
373  mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
374  buf->is_direct, &buf->mr);
375 }
376 
377 static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
378  struct mthca_qp *qp, int wqe_index, int is_send,
379  struct mthca_err_cqe *cqe,
380  struct ib_wc *entry, int *free_cqe)
381 {
382  int dbd;
383  __be32 new_wqe;
384 
385  if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
386  mthca_dbg(dev, "local QP operation err "
387  "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
388  be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
389  cq->cqn, cq->cons_index);
390  dump_cqe(dev, cqe);
391  }
392 
393  /*
394  * For completions in error, only work request ID, status, vendor error
395  * (and freed resource count for RD) have to be set.
396  */
397  switch (cqe->syndrome) {
399  entry->status = IB_WC_LOC_LEN_ERR;
400  break;
402  entry->status = IB_WC_LOC_QP_OP_ERR;
403  break;
405  entry->status = IB_WC_LOC_EEC_OP_ERR;
406  break;
408  entry->status = IB_WC_LOC_PROT_ERR;
409  break;
411  entry->status = IB_WC_WR_FLUSH_ERR;
412  break;
414  entry->status = IB_WC_MW_BIND_ERR;
415  break;
417  entry->status = IB_WC_BAD_RESP_ERR;
418  break;
420  entry->status = IB_WC_LOC_ACCESS_ERR;
421  break;
423  entry->status = IB_WC_REM_INV_REQ_ERR;
424  break;
426  entry->status = IB_WC_REM_ACCESS_ERR;
427  break;
429  entry->status = IB_WC_REM_OP_ERR;
430  break;
432  entry->status = IB_WC_RETRY_EXC_ERR;
433  break;
436  break;
439  break;
442  break;
444  entry->status = IB_WC_REM_ABORT_ERR;
445  break;
447  entry->status = IB_WC_INV_EECN_ERR;
448  break;
451  break;
452  default:
453  entry->status = IB_WC_GENERAL_ERR;
454  break;
455  }
456 
457  entry->vendor_err = cqe->vendor_err;
458 
459  /*
460  * Mem-free HCAs always generate one CQE per WQE, even in the
461  * error case, so we don't have to check the doorbell count, etc.
462  */
463  if (mthca_is_memfree(dev))
464  return;
465 
466  mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
467 
468  /*
469  * If we're at the end of the WQE chain, or we've used up our
470  * doorbell count, free the CQE. Otherwise just update it for
471  * the next poll operation.
472  */
473  if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
474  return;
475 
476  be16_add_cpu(&cqe->db_cnt, -dbd);
477  cqe->wqe = new_wqe;
479 
480  *free_cqe = 0;
481 }
482 
483 static inline int mthca_poll_one(struct mthca_dev *dev,
484  struct mthca_cq *cq,
485  struct mthca_qp **cur_qp,
486  int *freed,
487  struct ib_wc *entry)
488 {
489  struct mthca_wq *wq;
490  struct mthca_cqe *cqe;
491  int wqe_index;
492  int is_error;
493  int is_send;
494  int free_cqe = 1;
495  int err = 0;
496  u16 checksum;
497 
498  cqe = next_cqe_sw(cq);
499  if (!cqe)
500  return -EAGAIN;
501 
502  /*
503  * Make sure we read CQ entry contents after we've checked the
504  * ownership bit.
505  */
506  rmb();
507 
508  if (0) {
509  mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
510  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
511  be32_to_cpu(cqe->wqe));
512  dump_cqe(dev, cqe);
513  }
514 
515  is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
517  is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
518 
519  if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
520  /*
521  * We do not have to take the QP table lock here,
522  * because CQs will be locked while QPs are removed
523  * from the table.
524  */
525  *cur_qp = mthca_array_get(&dev->qp_table.qp,
526  be32_to_cpu(cqe->my_qpn) &
527  (dev->limits.num_qps - 1));
528  if (!*cur_qp) {
529  mthca_warn(dev, "CQ entry for unknown QP %06x\n",
530  be32_to_cpu(cqe->my_qpn) & 0xffffff);
531  err = -EINVAL;
532  goto out;
533  }
534  }
535 
536  entry->qp = &(*cur_qp)->ibqp;
537 
538  if (is_send) {
539  wq = &(*cur_qp)->sq;
540  wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
541  >> wq->wqe_shift);
542  entry->wr_id = (*cur_qp)->wrid[wqe_index +
543  (*cur_qp)->rq.max];
544  } else if ((*cur_qp)->ibqp.srq) {
545  struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
546  u32 wqe = be32_to_cpu(cqe->wqe);
547  wq = NULL;
548  wqe_index = wqe >> srq->wqe_shift;
549  entry->wr_id = srq->wrid[wqe_index];
550  mthca_free_srq_wqe(srq, wqe);
551  } else {
552  s32 wqe;
553  wq = &(*cur_qp)->rq;
554  wqe = be32_to_cpu(cqe->wqe);
555  wqe_index = wqe >> wq->wqe_shift;
556  /*
557  * WQE addr == base - 1 might be reported in receive completion
558  * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
559  * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
560  */
561  if (unlikely(wqe_index < 0))
562  wqe_index = wq->max - 1;
563  entry->wr_id = (*cur_qp)->wrid[wqe_index];
564  }
565 
566  if (wq) {
567  if (wq->last_comp < wqe_index)
568  wq->tail += wqe_index - wq->last_comp;
569  else
570  wq->tail += wqe_index + wq->max - wq->last_comp;
571 
572  wq->last_comp = wqe_index;
573  }
574 
575  if (is_error) {
576  handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
577  (struct mthca_err_cqe *) cqe,
578  entry, &free_cqe);
579  goto out;
580  }
581 
582  if (is_send) {
583  entry->wc_flags = 0;
584  switch (cqe->opcode) {
586  entry->opcode = IB_WC_RDMA_WRITE;
587  break;
589  entry->opcode = IB_WC_RDMA_WRITE;
590  entry->wc_flags |= IB_WC_WITH_IMM;
591  break;
592  case MTHCA_OPCODE_SEND:
593  entry->opcode = IB_WC_SEND;
594  break;
596  entry->opcode = IB_WC_SEND;
597  entry->wc_flags |= IB_WC_WITH_IMM;
598  break;
600  entry->opcode = IB_WC_RDMA_READ;
601  entry->byte_len = be32_to_cpu(cqe->byte_cnt);
602  break;
604  entry->opcode = IB_WC_COMP_SWAP;
606  break;
608  entry->opcode = IB_WC_FETCH_ADD;
610  break;
612  entry->opcode = IB_WC_BIND_MW;
613  break;
614  default:
615  entry->opcode = MTHCA_OPCODE_INVALID;
616  break;
617  }
618  } else {
619  entry->byte_len = be32_to_cpu(cqe->byte_cnt);
620  switch (cqe->opcode & 0x1f) {
623  entry->wc_flags = IB_WC_WITH_IMM;
624  entry->ex.imm_data = cqe->imm_etype_pkey_eec;
625  entry->opcode = IB_WC_RECV;
626  break;
629  entry->wc_flags = IB_WC_WITH_IMM;
630  entry->ex.imm_data = cqe->imm_etype_pkey_eec;
632  break;
633  default:
634  entry->wc_flags = 0;
635  entry->opcode = IB_WC_RECV;
636  break;
637  }
638  entry->slid = be16_to_cpu(cqe->rlid);
639  entry->sl = cqe->sl_ipok >> 4;
640  entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
641  entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
642  entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
643  entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
644  checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
645  ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
646  entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ?
647  IB_WC_IP_CSUM_OK : 0;
648  }
649 
650  entry->status = IB_WC_SUCCESS;
651 
652  out:
653  if (likely(free_cqe)) {
654  set_cqe_hw(cqe);
655  ++(*freed);
656  ++cq->cons_index;
657  }
658 
659  return err;
660 }
661 
662 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
663  struct ib_wc *entry)
664 {
665  struct mthca_dev *dev = to_mdev(ibcq->device);
666  struct mthca_cq *cq = to_mcq(ibcq);
667  struct mthca_qp *qp = NULL;
668  unsigned long flags;
669  int err = 0;
670  int freed = 0;
671  int npolled;
672 
673  spin_lock_irqsave(&cq->lock, flags);
674 
675  npolled = 0;
676 repoll:
677  while (npolled < num_entries) {
678  err = mthca_poll_one(dev, cq, &qp,
679  &freed, entry + npolled);
680  if (err)
681  break;
682  ++npolled;
683  }
684 
685  if (freed) {
686  wmb();
687  update_cons_index(dev, cq, freed);
688  }
689 
690  /*
691  * If a CQ resize is in progress and we discovered that the
692  * old buffer is empty, then peek in the new buffer, and if
693  * it's not empty, switch to the new buffer and continue
694  * polling there.
695  */
696  if (unlikely(err == -EAGAIN && cq->resize_buf &&
697  cq->resize_buf->state == CQ_RESIZE_READY)) {
698  /*
699  * In Tavor mode, the hardware keeps the producer
700  * index modulo the CQ size. Since we might be making
701  * the CQ bigger, we need to mask our consumer index
702  * using the size of the old CQ buffer before looking
703  * in the new CQ buffer.
704  */
705  if (!mthca_is_memfree(dev))
706  cq->cons_index &= cq->ibcq.cqe;
707 
708  if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
709  cq->cons_index & cq->resize_buf->cqe))) {
710  struct mthca_cq_buf tbuf;
711  int tcqe;
712 
713  tbuf = cq->buf;
714  tcqe = cq->ibcq.cqe;
715  cq->buf = cq->resize_buf->buf;
716  cq->ibcq.cqe = cq->resize_buf->cqe;
717 
718  cq->resize_buf->buf = tbuf;
719  cq->resize_buf->cqe = tcqe;
720  cq->resize_buf->state = CQ_RESIZE_SWAPPED;
721 
722  goto repoll;
723  }
724  }
725 
726  spin_unlock_irqrestore(&cq->lock, flags);
727 
728  return err == 0 || err == -EAGAIN ? npolled : err;
729 }
730 
732 {
733  u32 dbhi = ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
736  to_mcq(cq)->cqn;
737 
738  mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
739  MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
740 
741  return 0;
742 }
743 
745 {
746  struct mthca_cq *cq = to_mcq(ibcq);
747  __be32 db_rec[2];
748  u32 dbhi;
749  u32 sn = cq->arm_sn & 3;
750 
751  db_rec[0] = cpu_to_be32(cq->cons_index);
752  db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
753  ((flags & IB_CQ_SOLICITED_MASK) ==
754  IB_CQ_SOLICITED ? 1 : 2));
755 
756  mthca_write_db_rec(db_rec, cq->arm_db);
757 
758  /*
759  * Make sure that the doorbell record in host memory is
760  * written before ringing the doorbell via PCI MMIO.
761  */
762  wmb();
763 
764  dbhi = (sn << 28) |
765  ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
768 
769  mthca_write64(dbhi, cq->cons_index,
770  to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
771  MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
772 
773  return 0;
774 }
775 
776 int mthca_init_cq(struct mthca_dev *dev, int nent,
777  struct mthca_ucontext *ctx, u32 pdn,
778  struct mthca_cq *cq)
779 {
780  struct mthca_mailbox *mailbox;
782  int err = -ENOMEM;
783 
784  cq->ibcq.cqe = nent - 1;
785  cq->is_kernel = !ctx;
786 
787  cq->cqn = mthca_alloc(&dev->cq_table.alloc);
788  if (cq->cqn == -1)
789  return -ENOMEM;
790 
791  if (mthca_is_memfree(dev)) {
792  err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
793  if (err)
794  goto err_out;
795 
796  if (cq->is_kernel) {
797  cq->arm_sn = 1;
798 
799  err = -ENOMEM;
800 
802  cq->cqn, &cq->set_ci_db);
803  if (cq->set_ci_db_index < 0)
804  goto err_out_icm;
805 
807  cq->cqn, &cq->arm_db);
808  if (cq->arm_db_index < 0)
809  goto err_out_ci;
810  }
811  }
812 
813  mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
814  if (IS_ERR(mailbox))
815  goto err_out_arm;
816 
817  cq_context = mailbox->buf;
818 
819  if (cq->is_kernel) {
820  err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
821  if (err)
822  goto err_out_mailbox;
823  }
824 
825  spin_lock_init(&cq->lock);
826  cq->refcount = 1;
828  mutex_init(&cq->mutex);
829 
830  memset(cq_context, 0, sizeof *cq_context);
831  cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
834  cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
835  if (ctx)
836  cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
837  else
838  cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
839  cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
840  cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
841  cq_context->pd = cpu_to_be32(pdn);
842  cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey);
843  cq_context->cqn = cpu_to_be32(cq->cqn);
844 
845  if (mthca_is_memfree(dev)) {
846  cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
847  cq_context->state_db = cpu_to_be32(cq->arm_db_index);
848  }
849 
850  err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
851  if (err) {
852  mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
853  goto err_out_free_mr;
854  }
855 
856  spin_lock_irq(&dev->cq_table.lock);
857  if (mthca_array_set(&dev->cq_table.cq,
858  cq->cqn & (dev->limits.num_cqs - 1),
859  cq)) {
860  spin_unlock_irq(&dev->cq_table.lock);
861  goto err_out_free_mr;
862  }
863  spin_unlock_irq(&dev->cq_table.lock);
864 
865  cq->cons_index = 0;
866 
867  mthca_free_mailbox(dev, mailbox);
868 
869  return 0;
870 
871 err_out_free_mr:
872  if (cq->is_kernel)
873  mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
874 
875 err_out_mailbox:
876  mthca_free_mailbox(dev, mailbox);
877 
878 err_out_arm:
879  if (cq->is_kernel && mthca_is_memfree(dev))
881 
882 err_out_ci:
883  if (cq->is_kernel && mthca_is_memfree(dev))
885 
886 err_out_icm:
887  mthca_table_put(dev, dev->cq_table.table, cq->cqn);
888 
889 err_out:
890  mthca_free(&dev->cq_table.alloc, cq->cqn);
891 
892  return err;
893 }
894 
895 static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
896 {
897  int c;
898 
899  spin_lock_irq(&dev->cq_table.lock);
900  c = cq->refcount;
901  spin_unlock_irq(&dev->cq_table.lock);
902 
903  return c;
904 }
905 
906 void mthca_free_cq(struct mthca_dev *dev,
907  struct mthca_cq *cq)
908 {
909  struct mthca_mailbox *mailbox;
910  int err;
911 
912  mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
913  if (IS_ERR(mailbox)) {
914  mthca_warn(dev, "No memory for mailbox to free CQ.\n");
915  return;
916  }
917 
918  err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
919  if (err)
920  mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
921 
922  if (0) {
923  __be32 *ctx = mailbox->buf;
924  int j;
925 
926  printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
927  cq->cqn, cq->cons_index,
928  cq->is_kernel ? !!next_cqe_sw(cq) : 0);
929  for (j = 0; j < 16; ++j)
930  printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
931  }
932 
933  spin_lock_irq(&dev->cq_table.lock);
934  mthca_array_clear(&dev->cq_table.cq,
935  cq->cqn & (dev->limits.num_cqs - 1));
936  --cq->refcount;
937  spin_unlock_irq(&dev->cq_table.lock);
938 
939  if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
940  synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
941  else
942  synchronize_irq(dev->pdev->irq);
943 
944  wait_event(cq->wait, !get_cq_refcount(dev, cq));
945 
946  if (cq->is_kernel) {
947  mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
948  if (mthca_is_memfree(dev)) {
951  }
952  }
953 
954  mthca_table_put(dev, dev->cq_table.table, cq->cqn);
955  mthca_free(&dev->cq_table.alloc, cq->cqn);
956  mthca_free_mailbox(dev, mailbox);
957 }
958 
960 {
961  int err;
962 
963  spin_lock_init(&dev->cq_table.lock);
964 
965  err = mthca_alloc_init(&dev->cq_table.alloc,
966  dev->limits.num_cqs,
967  (1 << 24) - 1,
968  dev->limits.reserved_cqs);
969  if (err)
970  return err;
971 
972  err = mthca_array_init(&dev->cq_table.cq,
973  dev->limits.num_cqs);
974  if (err)
975  mthca_alloc_cleanup(&dev->cq_table.alloc);
976 
977  return err;
978 }
979 
981 {
982  mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
983  mthca_alloc_cleanup(&dev->cq_table.alloc);
984 }