Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
c2_cq.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses. You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  * Redistribution and use in source and binary forms, with or
16  * without modification, are permitted provided that the following
17  * conditions are met:
18  *
19  * - Redistributions of source code must retain the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer.
22  *
23  * - Redistributions in binary form must reproduce the above
24  * copyright notice, this list of conditions and the following
25  * disclaimer in the documentation and/or other materials
26  * provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  *
37  */
38 #include <linux/gfp.h>
39 
40 #include "c2.h"
41 #include "c2_vq.h"
42 #include "c2_status.h"
43 
44 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
45 
46 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
47 {
48  struct c2_cq *cq;
49  unsigned long flags;
50 
51  spin_lock_irqsave(&c2dev->lock, flags);
52  cq = c2dev->qptr_array[cqn];
53  if (!cq) {
54  spin_unlock_irqrestore(&c2dev->lock, flags);
55  return NULL;
56  }
57  atomic_inc(&cq->refcount);
58  spin_unlock_irqrestore(&c2dev->lock, flags);
59  return cq;
60 }
61 
62 static void c2_cq_put(struct c2_cq *cq)
63 {
64  if (atomic_dec_and_test(&cq->refcount))
65  wake_up(&cq->wait);
66 }
67 
68 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
69 {
70  struct c2_cq *cq;
71 
72  cq = c2_cq_get(c2dev, mq_index);
73  if (!cq) {
74  printk("discarding events on destroyed CQN=%d\n", mq_index);
75  return;
76  }
77 
78  (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
79  c2_cq_put(cq);
80 }
81 
82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
83 {
84  struct c2_cq *cq;
85  struct c2_mq *q;
86 
87  cq = c2_cq_get(c2dev, mq_index);
88  if (!cq)
89  return;
90 
91  spin_lock_irq(&cq->lock);
92  q = &cq->mq;
93  if (q && !c2_mq_empty(q)) {
94  u16 priv = q->priv;
95  struct c2wr_ce *msg;
96 
97  while (priv != be16_to_cpu(*q->shared)) {
98  msg = (struct c2wr_ce *)
99  (q->msg_pool.host + priv * q->msg_size);
100  if (msg->qp_user_context == (u64) (unsigned long) qp) {
101  msg->qp_user_context = (u64) 0;
102  }
103  priv = (priv + 1) % q->q_size;
104  }
105  }
106  spin_unlock_irq(&cq->lock);
107  c2_cq_put(cq);
108 }
109 
110 static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
111 {
112  switch (status) {
113  case C2_OK:
114  return IB_WC_SUCCESS;
115  case CCERR_FLUSHED:
116  return IB_WC_WR_FLUSH_ERR;
118  return IB_WC_LOC_PROT_ERR;
120  return IB_WC_LOC_ACCESS_ERR;
122  return IB_WC_LOC_LEN_ERR;
124  return IB_WC_MW_BIND_ERR;
125  default:
126  return IB_WC_GENERAL_ERR;
127  }
128 }
129 
130 
131 static inline int c2_poll_one(struct c2_dev *c2dev,
132  struct c2_cq *cq, struct ib_wc *entry)
133 {
134  struct c2wr_ce *ce;
135  struct c2_qp *qp;
136  int is_recv = 0;
137 
138  ce = c2_mq_consume(&cq->mq);
139  if (!ce) {
140  return -EAGAIN;
141  }
142 
143  /*
144  * if the qp returned is null then this qp has already
145  * been freed and we are unable process the completion.
146  * try pulling the next message
147  */
148  while ((qp =
149  (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
150  c2_mq_free(&cq->mq);
151  ce = c2_mq_consume(&cq->mq);
152  if (!ce)
153  return -EAGAIN;
154  }
155 
156  entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
157  entry->wr_id = ce->hdr.context;
158  entry->qp = &qp->ibqp;
159  entry->wc_flags = 0;
160  entry->slid = 0;
161  entry->sl = 0;
162  entry->src_qp = 0;
163  entry->dlid_path_bits = 0;
164  entry->pkey_index = 0;
165 
166  switch (c2_wr_get_id(ce)) {
167  case C2_WR_TYPE_SEND:
168  entry->opcode = IB_WC_SEND;
169  break;
171  entry->opcode = IB_WC_RDMA_WRITE;
172  break;
174  entry->opcode = IB_WC_RDMA_READ;
175  break;
176  case C2_WR_TYPE_BIND_MW:
177  entry->opcode = IB_WC_BIND_MW;
178  break;
179  case C2_WR_TYPE_RECV:
180  entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
181  entry->opcode = IB_WC_RECV;
182  is_recv = 1;
183  break;
184  default:
185  break;
186  }
187 
188  /* consume the WQEs */
189  if (is_recv)
190  c2_mq_lconsume(&qp->rq_mq, 1);
191  else
192  c2_mq_lconsume(&qp->sq_mq,
193  be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
194 
195  /* free the message */
196  c2_mq_free(&cq->mq);
197 
198  return 0;
199 }
200 
201 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
202 {
203  struct c2_dev *c2dev = to_c2dev(ibcq->device);
204  struct c2_cq *cq = to_c2cq(ibcq);
205  unsigned long flags;
206  int npolled, err;
207 
208  spin_lock_irqsave(&cq->lock, flags);
209 
210  for (npolled = 0; npolled < num_entries; ++npolled) {
211 
212  err = c2_poll_one(c2dev, cq, entry + npolled);
213  if (err)
214  break;
215  }
216 
217  spin_unlock_irqrestore(&cq->lock, flags);
218 
219  return npolled;
220 }
221 
222 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
223 {
224  struct c2_mq_shared __iomem *shared;
225  struct c2_cq *cq;
226  unsigned long flags;
227  int ret = 0;
228 
229  cq = to_c2cq(ibcq);
230  shared = cq->mq.peer;
231 
232  if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
234  else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
236  else
237  return -EINVAL;
238 
239  writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
240 
241  /*
242  * Now read back shared->armed to make the PCI
243  * write synchronous. This is necessary for
244  * correct cq notification semantics.
245  */
246  readb(&shared->armed);
247 
248  if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
249  spin_lock_irqsave(&cq->lock, flags);
250  ret = !c2_mq_empty(&cq->mq);
251  spin_unlock_irqrestore(&cq->lock, flags);
252  }
253 
254  return ret;
255 }
256 
257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258 {
259  dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260  mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261 }
262 
263 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
264  int msg_size)
265 {
266  u8 *pool_start;
267 
268  pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
269  &mq->host_dma, GFP_KERNEL);
270  if (!pool_start)
271  return -ENOMEM;
272 
273  c2_mq_rep_init(mq,
274  0, /* index (currently unknown) */
275  q_size,
276  msg_size,
277  pool_start,
278  NULL, /* peer (currently unknown) */
280 
282 
283  return 0;
284 }
285 
286 int c2_init_cq(struct c2_dev *c2dev, int entries,
287  struct c2_ucontext *ctx, struct c2_cq *cq)
288 {
289  struct c2wr_cq_create_req wr;
290  struct c2wr_cq_create_rep *reply;
291  unsigned long peer_pa;
292  struct c2_vq_req *vq_req;
293  int err;
294 
295  might_sleep();
296 
297  cq->ibcq.cqe = entries - 1;
298  cq->is_kernel = !ctx;
299 
300  /* Allocate a shared pointer */
301  cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
302  &cq->mq.shared_dma, GFP_KERNEL);
303  if (!cq->mq.shared)
304  return -ENOMEM;
305 
306  /* Allocate pages for the message pool */
307  err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
308  if (err)
309  goto bail0;
310 
311  vq_req = vq_req_alloc(c2dev);
312  if (!vq_req) {
313  err = -ENOMEM;
314  goto bail1;
315  }
316 
317  memset(&wr, 0, sizeof(wr));
318  c2_wr_set_id(&wr, CCWR_CQ_CREATE);
319  wr.hdr.context = (unsigned long) vq_req;
320  wr.rnic_handle = c2dev->adapter_handle;
321  wr.msg_size = cpu_to_be32(cq->mq.msg_size);
322  wr.depth = cpu_to_be32(cq->mq.q_size);
323  wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
324  wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
325  wr.user_context = (u64) (unsigned long) (cq);
326 
327  vq_req_get(c2dev, vq_req);
328 
329  err = vq_send_wr(c2dev, (union c2wr *) & wr);
330  if (err) {
331  vq_req_put(c2dev, vq_req);
332  goto bail2;
333  }
334 
335  err = vq_wait_for_reply(c2dev, vq_req);
336  if (err)
337  goto bail2;
338 
339  reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
340  if (!reply) {
341  err = -ENOMEM;
342  goto bail2;
343  }
344 
345  if ((err = c2_errno(reply)) != 0)
346  goto bail3;
347 
348  cq->adapter_handle = reply->cq_handle;
349  cq->mq.index = be32_to_cpu(reply->mq_index);
350 
351  peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
352  cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
353  if (!cq->mq.peer) {
354  err = -ENOMEM;
355  goto bail3;
356  }
357 
358  vq_repbuf_free(c2dev, reply);
359  vq_req_free(c2dev, vq_req);
360 
361  spin_lock_init(&cq->lock);
362  atomic_set(&cq->refcount, 1);
364 
365  /*
366  * Use the MQ index allocated by the adapter to
367  * store the CQ in the qptr_array
368  */
369  cq->cqn = cq->mq.index;
370  c2dev->qptr_array[cq->cqn] = cq;
371 
372  return 0;
373 
374  bail3:
375  vq_repbuf_free(c2dev, reply);
376  bail2:
377  vq_req_free(c2dev, vq_req);
378  bail1:
379  c2_free_cq_buf(c2dev, &cq->mq);
380  bail0:
381  c2_free_mqsp(cq->mq.shared);
382 
383  return err;
384 }
385 
386 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
387 {
388  int err;
389  struct c2_vq_req *vq_req;
390  struct c2wr_cq_destroy_req wr;
391  struct c2wr_cq_destroy_rep *reply;
392 
393  might_sleep();
394 
395  /* Clear CQ from the qptr array */
396  spin_lock_irq(&c2dev->lock);
397  c2dev->qptr_array[cq->mq.index] = NULL;
398  atomic_dec(&cq->refcount);
399  spin_unlock_irq(&c2dev->lock);
400 
401  wait_event(cq->wait, !atomic_read(&cq->refcount));
402 
403  vq_req = vq_req_alloc(c2dev);
404  if (!vq_req) {
405  goto bail0;
406  }
407 
408  memset(&wr, 0, sizeof(wr));
409  c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
410  wr.hdr.context = (unsigned long) vq_req;
411  wr.rnic_handle = c2dev->adapter_handle;
412  wr.cq_handle = cq->adapter_handle;
413 
414  vq_req_get(c2dev, vq_req);
415 
416  err = vq_send_wr(c2dev, (union c2wr *) & wr);
417  if (err) {
418  vq_req_put(c2dev, vq_req);
419  goto bail1;
420  }
421 
422  err = vq_wait_for_reply(c2dev, vq_req);
423  if (err)
424  goto bail1;
425 
426  reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
427  if (reply)
428  vq_repbuf_free(c2dev, reply);
429  bail1:
430  vq_req_free(c2dev, vq_req);
431  bail0:
432  if (cq->is_kernel) {
433  c2_free_cq_buf(c2dev, &cq->mq);
434  }
435 
436  return;
437 }