Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qib_srq.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 
38 #include "qib_verbs.h"
39 
48 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
49  struct ib_recv_wr **bad_wr)
50 {
51  struct qib_srq *srq = to_isrq(ibsrq);
52  struct qib_rwq *wq;
53  unsigned long flags;
54  int ret;
55 
56  for (; wr; wr = wr->next) {
57  struct qib_rwqe *wqe;
58  u32 next;
59  int i;
60 
61  if ((unsigned) wr->num_sge > srq->rq.max_sge) {
62  *bad_wr = wr;
63  ret = -EINVAL;
64  goto bail;
65  }
66 
67  spin_lock_irqsave(&srq->rq.lock, flags);
68  wq = srq->rq.wq;
69  next = wq->head + 1;
70  if (next >= srq->rq.size)
71  next = 0;
72  if (next == wq->tail) {
73  spin_unlock_irqrestore(&srq->rq.lock, flags);
74  *bad_wr = wr;
75  ret = -ENOMEM;
76  goto bail;
77  }
78 
79  wqe = get_rwqe_ptr(&srq->rq, wq->head);
80  wqe->wr_id = wr->wr_id;
81  wqe->num_sge = wr->num_sge;
82  for (i = 0; i < wr->num_sge; i++)
83  wqe->sg_list[i] = wr->sg_list[i];
84  /* Make sure queue entry is written before the head index. */
85  smp_wmb();
86  wq->head = next;
87  spin_unlock_irqrestore(&srq->rq.lock, flags);
88  }
89  ret = 0;
90 
91 bail:
92  return ret;
93 }
94 
101 struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
102  struct ib_srq_init_attr *srq_init_attr,
103  struct ib_udata *udata)
104 {
105  struct qib_ibdev *dev = to_idev(ibpd->device);
106  struct qib_srq *srq;
107  u32 sz;
108  struct ib_srq *ret;
109 
110  if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
111  ret = ERR_PTR(-ENOSYS);
112  goto done;
113  }
114 
115  if (srq_init_attr->attr.max_sge == 0 ||
116  srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
117  srq_init_attr->attr.max_wr == 0 ||
118  srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
119  ret = ERR_PTR(-EINVAL);
120  goto done;
121  }
122 
123  srq = kmalloc(sizeof(*srq), GFP_KERNEL);
124  if (!srq) {
125  ret = ERR_PTR(-ENOMEM);
126  goto done;
127  }
128 
129  /*
130  * Need to use vmalloc() if we want to support large #s of entries.
131  */
132  srq->rq.size = srq_init_attr->attr.max_wr + 1;
133  srq->rq.max_sge = srq_init_attr->attr.max_sge;
134  sz = sizeof(struct ib_sge) * srq->rq.max_sge +
135  sizeof(struct qib_rwqe);
136  srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
137  if (!srq->rq.wq) {
138  ret = ERR_PTR(-ENOMEM);
139  goto bail_srq;
140  }
141 
142  /*
143  * Return the address of the RWQ as the offset to mmap.
144  * See qib_mmap() for details.
145  */
146  if (udata && udata->outlen >= sizeof(__u64)) {
147  int err;
148  u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
149 
150  srq->ip =
151  qib_create_mmap_info(dev, s, ibpd->uobject->context,
152  srq->rq.wq);
153  if (!srq->ip) {
154  ret = ERR_PTR(-ENOMEM);
155  goto bail_wq;
156  }
157 
158  err = ib_copy_to_udata(udata, &srq->ip->offset,
159  sizeof(srq->ip->offset));
160  if (err) {
161  ret = ERR_PTR(err);
162  goto bail_ip;
163  }
164  } else
165  srq->ip = NULL;
166 
167  /*
168  * ib_create_srq() will initialize srq->ibsrq.
169  */
170  spin_lock_init(&srq->rq.lock);
171  srq->rq.wq->head = 0;
172  srq->rq.wq->tail = 0;
173  srq->limit = srq_init_attr->attr.srq_limit;
174 
175  spin_lock(&dev->n_srqs_lock);
176  if (dev->n_srqs_allocated == ib_qib_max_srqs) {
177  spin_unlock(&dev->n_srqs_lock);
178  ret = ERR_PTR(-ENOMEM);
179  goto bail_ip;
180  }
181 
182  dev->n_srqs_allocated++;
183  spin_unlock(&dev->n_srqs_lock);
184 
185  if (srq->ip) {
186  spin_lock_irq(&dev->pending_lock);
187  list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
188  spin_unlock_irq(&dev->pending_lock);
189  }
190 
191  ret = &srq->ibsrq;
192  goto done;
193 
194 bail_ip:
195  kfree(srq->ip);
196 bail_wq:
197  vfree(srq->rq.wq);
198 bail_srq:
199  kfree(srq);
200 done:
201  return ret;
202 }
203 
211 int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
212  enum ib_srq_attr_mask attr_mask,
213  struct ib_udata *udata)
214 {
215  struct qib_srq *srq = to_isrq(ibsrq);
216  struct qib_rwq *wq;
217  int ret = 0;
218 
219  if (attr_mask & IB_SRQ_MAX_WR) {
220  struct qib_rwq *owq;
221  struct qib_rwqe *p;
222  u32 sz, size, n, head, tail;
223 
224  /* Check that the requested sizes are below the limits. */
225  if ((attr->max_wr > ib_qib_max_srq_wrs) ||
226  ((attr_mask & IB_SRQ_LIMIT) ?
227  attr->srq_limit : srq->limit) > attr->max_wr) {
228  ret = -EINVAL;
229  goto bail;
230  }
231 
232  sz = sizeof(struct qib_rwqe) +
233  srq->rq.max_sge * sizeof(struct ib_sge);
234  size = attr->max_wr + 1;
235  wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
236  if (!wq) {
237  ret = -ENOMEM;
238  goto bail;
239  }
240 
241  /* Check that we can write the offset to mmap. */
242  if (udata && udata->inlen >= sizeof(__u64)) {
243  __u64 offset_addr;
244  __u64 offset = 0;
245 
246  ret = ib_copy_from_udata(&offset_addr, udata,
247  sizeof(offset_addr));
248  if (ret)
249  goto bail_free;
250  udata->outbuf =
251  (void __user *) (unsigned long) offset_addr;
252  ret = ib_copy_to_udata(udata, &offset,
253  sizeof(offset));
254  if (ret)
255  goto bail_free;
256  }
257 
258  spin_lock_irq(&srq->rq.lock);
259  /*
260  * validate head and tail pointer values and compute
261  * the number of remaining WQEs.
262  */
263  owq = srq->rq.wq;
264  head = owq->head;
265  tail = owq->tail;
266  if (head >= srq->rq.size || tail >= srq->rq.size) {
267  ret = -EINVAL;
268  goto bail_unlock;
269  }
270  n = head;
271  if (n < tail)
272  n += srq->rq.size - tail;
273  else
274  n -= tail;
275  if (size <= n) {
276  ret = -EINVAL;
277  goto bail_unlock;
278  }
279  n = 0;
280  p = wq->wq;
281  while (tail != head) {
282  struct qib_rwqe *wqe;
283  int i;
284 
285  wqe = get_rwqe_ptr(&srq->rq, tail);
286  p->wr_id = wqe->wr_id;
287  p->num_sge = wqe->num_sge;
288  for (i = 0; i < wqe->num_sge; i++)
289  p->sg_list[i] = wqe->sg_list[i];
290  n++;
291  p = (struct qib_rwqe *)((char *) p + sz);
292  if (++tail >= srq->rq.size)
293  tail = 0;
294  }
295  srq->rq.wq = wq;
296  srq->rq.size = size;
297  wq->head = n;
298  wq->tail = 0;
299  if (attr_mask & IB_SRQ_LIMIT)
300  srq->limit = attr->srq_limit;
301  spin_unlock_irq(&srq->rq.lock);
302 
303  vfree(owq);
304 
305  if (srq->ip) {
306  struct qib_mmap_info *ip = srq->ip;
307  struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
308  u32 s = sizeof(struct qib_rwq) + size * sz;
309 
310  qib_update_mmap_info(dev, ip, s, wq);
311 
312  /*
313  * Return the offset to mmap.
314  * See qib_mmap() for details.
315  */
316  if (udata && udata->inlen >= sizeof(__u64)) {
317  ret = ib_copy_to_udata(udata, &ip->offset,
318  sizeof(ip->offset));
319  if (ret)
320  goto bail;
321  }
322 
323  /*
324  * Put user mapping info onto the pending list
325  * unless it already is on the list.
326  */
327  spin_lock_irq(&dev->pending_lock);
328  if (list_empty(&ip->pending_mmaps))
329  list_add(&ip->pending_mmaps,
330  &dev->pending_mmaps);
331  spin_unlock_irq(&dev->pending_lock);
332  }
333  } else if (attr_mask & IB_SRQ_LIMIT) {
334  spin_lock_irq(&srq->rq.lock);
335  if (attr->srq_limit >= srq->rq.size)
336  ret = -EINVAL;
337  else
338  srq->limit = attr->srq_limit;
339  spin_unlock_irq(&srq->rq.lock);
340  }
341  goto bail;
342 
343 bail_unlock:
344  spin_unlock_irq(&srq->rq.lock);
345 bail_free:
346  vfree(wq);
347 bail:
348  return ret;
349 }
350 
351 int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
352 {
353  struct qib_srq *srq = to_isrq(ibsrq);
354 
355  attr->max_wr = srq->rq.size - 1;
356  attr->max_sge = srq->rq.max_sge;
357  attr->srq_limit = srq->limit;
358  return 0;
359 }
360 
366 {
367  struct qib_srq *srq = to_isrq(ibsrq);
368  struct qib_ibdev *dev = to_idev(ibsrq->device);
369 
370  spin_lock(&dev->n_srqs_lock);
371  dev->n_srqs_allocated--;
372  spin_unlock(&dev->n_srqs_lock);
373  if (srq->ip)
374  kref_put(&srq->ip->ref, qib_release_mmap_info);
375  else
376  vfree(srq->rq.wq);
377  kfree(srq);
378 
379  return 0;
380 }