Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipath_mr.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/slab.h>
35 
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_pack.h>
38 #include <rdma/ib_smi.h>
39 
40 #include "ipath_verbs.h"
41 
42 /* Fast memory region */
43 struct ipath_fmr {
44  struct ib_fmr ibfmr;
46  struct ipath_mregion mr; /* must be last */
47 };
48 
49 static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
50 {
51  return container_of(ibfmr, struct ipath_fmr, ibfmr);
52 }
53 
63 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
64 {
65  struct ipath_mr *mr;
66  struct ib_mr *ret;
67 
68  mr = kzalloc(sizeof *mr, GFP_KERNEL);
69  if (!mr) {
70  ret = ERR_PTR(-ENOMEM);
71  goto bail;
72  }
73 
74  mr->mr.access_flags = acc;
75  ret = &mr->ibmr;
76 
77 bail:
78  return ret;
79 }
80 
81 static struct ipath_mr *alloc_mr(int count,
82  struct ipath_lkey_table *lk_table)
83 {
84  struct ipath_mr *mr;
85  int m, i = 0;
86 
87  /* Allocate struct plus pointers to first level page tables. */
88  m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
89  mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
90  if (!mr)
91  goto done;
92 
93  /* Allocate first level page tables. */
94  for (; i < m; i++) {
95  mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
96  if (!mr->mr.map[i])
97  goto bail;
98  }
99  mr->mr.mapsz = m;
100 
101  /*
102  * ib_reg_phys_mr() will initialize mr->ibmr except for
103  * lkey and rkey.
104  */
105  if (!ipath_alloc_lkey(lk_table, &mr->mr))
106  goto bail;
107  mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
108 
109  goto done;
110 
111 bail:
112  while (i) {
113  i--;
114  kfree(mr->mr.map[i]);
115  }
116  kfree(mr);
117  mr = NULL;
118 
119 done:
120  return mr;
121 }
122 
132 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
133  struct ib_phys_buf *buffer_list,
134  int num_phys_buf, int acc, u64 *iova_start)
135 {
136  struct ipath_mr *mr;
137  int n, m, i;
138  struct ib_mr *ret;
139 
140  mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
141  if (mr == NULL) {
142  ret = ERR_PTR(-ENOMEM);
143  goto bail;
144  }
145 
146  mr->mr.pd = pd;
147  mr->mr.user_base = *iova_start;
148  mr->mr.iova = *iova_start;
149  mr->mr.length = 0;
150  mr->mr.offset = 0;
151  mr->mr.access_flags = acc;
152  mr->mr.max_segs = num_phys_buf;
153  mr->umem = NULL;
154 
155  m = 0;
156  n = 0;
157  for (i = 0; i < num_phys_buf; i++) {
158  mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
159  mr->mr.map[m]->segs[n].length = buffer_list[i].size;
160  mr->mr.length += buffer_list[i].size;
161  n++;
162  if (n == IPATH_SEGSZ) {
163  m++;
164  n = 0;
165  }
166  }
167 
168  ret = &mr->ibmr;
169 
170 bail:
171  return ret;
172 }
173 
186  u64 virt_addr, int mr_access_flags,
187  struct ib_udata *udata)
188 {
189  struct ipath_mr *mr;
190  struct ib_umem *umem;
191  struct ib_umem_chunk *chunk;
192  int n, m, i;
193  struct ib_mr *ret;
194 
195  if (length == 0) {
196  ret = ERR_PTR(-EINVAL);
197  goto bail;
198  }
199 
200  umem = ib_umem_get(pd->uobject->context, start, length,
201  mr_access_flags, 0);
202  if (IS_ERR(umem))
203  return (void *) umem;
204 
205  n = 0;
206  list_for_each_entry(chunk, &umem->chunk_list, list)
207  n += chunk->nents;
208 
209  mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
210  if (!mr) {
211  ret = ERR_PTR(-ENOMEM);
212  ib_umem_release(umem);
213  goto bail;
214  }
215 
216  mr->mr.pd = pd;
217  mr->mr.user_base = start;
218  mr->mr.iova = virt_addr;
219  mr->mr.length = length;
220  mr->mr.offset = umem->offset;
221  mr->mr.access_flags = mr_access_flags;
222  mr->mr.max_segs = n;
223  mr->umem = umem;
224 
225  m = 0;
226  n = 0;
227  list_for_each_entry(chunk, &umem->chunk_list, list) {
228  for (i = 0; i < chunk->nents; i++) {
229  void *vaddr;
230 
231  vaddr = page_address(sg_page(&chunk->page_list[i]));
232  if (!vaddr) {
233  ret = ERR_PTR(-EINVAL);
234  goto bail;
235  }
236  mr->mr.map[m]->segs[n].vaddr = vaddr;
237  mr->mr.map[m]->segs[n].length = umem->page_size;
238  n++;
239  if (n == IPATH_SEGSZ) {
240  m++;
241  n = 0;
242  }
243  }
244  }
245  ret = &mr->ibmr;
246 
247 bail:
248  return ret;
249 }
250 
260 int ipath_dereg_mr(struct ib_mr *ibmr)
261 {
262  struct ipath_mr *mr = to_imr(ibmr);
263  int i;
264 
265  ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
266  i = mr->mr.mapsz;
267  while (i) {
268  i--;
269  kfree(mr->mr.map[i]);
270  }
271 
272  if (mr->umem)
273  ib_umem_release(mr->umem);
274 
275  kfree(mr);
276  return 0;
277 }
278 
287 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
288  struct ib_fmr_attr *fmr_attr)
289 {
290  struct ipath_fmr *fmr;
291  int m, i = 0;
292  struct ib_fmr *ret;
293 
294  /* Allocate struct plus pointers to first level page tables. */
295  m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
296  fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
297  if (!fmr)
298  goto bail;
299 
300  /* Allocate first level page tables. */
301  for (; i < m; i++) {
302  fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
303  GFP_KERNEL);
304  if (!fmr->mr.map[i])
305  goto bail;
306  }
307  fmr->mr.mapsz = m;
308 
309  /*
310  * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
311  * rkey.
312  */
313  if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
314  goto bail;
315  fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
316  /*
317  * Resources are allocated but no valid mapping (RKEY can't be
318  * used).
319  */
320  fmr->mr.pd = pd;
321  fmr->mr.user_base = 0;
322  fmr->mr.iova = 0;
323  fmr->mr.length = 0;
324  fmr->mr.offset = 0;
325  fmr->mr.access_flags = mr_access_flags;
326  fmr->mr.max_segs = fmr_attr->max_pages;
327  fmr->page_shift = fmr_attr->page_shift;
328 
329  ret = &fmr->ibfmr;
330  goto done;
331 
332 bail:
333  while (i)
334  kfree(fmr->mr.map[--i]);
335  kfree(fmr);
336  ret = ERR_PTR(-ENOMEM);
337 
338 done:
339  return ret;
340 }
341 
352 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
353  int list_len, u64 iova)
354 {
355  struct ipath_fmr *fmr = to_ifmr(ibfmr);
356  struct ipath_lkey_table *rkt;
357  unsigned long flags;
358  int m, n, i;
359  u32 ps;
360  int ret;
361 
362  if (list_len > fmr->mr.max_segs) {
363  ret = -EINVAL;
364  goto bail;
365  }
366  rkt = &to_idev(ibfmr->device)->lk_table;
367  spin_lock_irqsave(&rkt->lock, flags);
368  fmr->mr.user_base = iova;
369  fmr->mr.iova = iova;
370  ps = 1 << fmr->page_shift;
371  fmr->mr.length = list_len * ps;
372  m = 0;
373  n = 0;
374  ps = 1 << fmr->page_shift;
375  for (i = 0; i < list_len; i++) {
376  fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
377  fmr->mr.map[m]->segs[n].length = ps;
378  if (++n == IPATH_SEGSZ) {
379  m++;
380  n = 0;
381  }
382  }
383  spin_unlock_irqrestore(&rkt->lock, flags);
384  ret = 0;
385 
386 bail:
387  return ret;
388 }
389 
396 int ipath_unmap_fmr(struct list_head *fmr_list)
397 {
398  struct ipath_fmr *fmr;
399  struct ipath_lkey_table *rkt;
400  unsigned long flags;
401 
402  list_for_each_entry(fmr, fmr_list, ibfmr.list) {
403  rkt = &to_idev(fmr->ibfmr.device)->lk_table;
404  spin_lock_irqsave(&rkt->lock, flags);
405  fmr->mr.user_base = 0;
406  fmr->mr.iova = 0;
407  fmr->mr.length = 0;
408  spin_unlock_irqrestore(&rkt->lock, flags);
409  }
410  return 0;
411 }
412 
419 int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
420 {
421  struct ipath_fmr *fmr = to_ifmr(ibfmr);
422  int i;
423 
424  ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
425  i = fmr->mr.mapsz;
426  while (i)
427  kfree(fmr->mr.map[--i]);
428  kfree(fmr);
429  return 0;
430 }