Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mr.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/slab.h>
35 
36 #include "mlx4_ib.h"
37 
38 static u32 convert_access(int acc)
39 {
40  return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
45 }
46 
47 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
48 {
49  struct mlx4_ib_mr *mr;
50  int err;
51 
52  mr = kmalloc(sizeof *mr, GFP_KERNEL);
53  if (!mr)
54  return ERR_PTR(-ENOMEM);
55 
56  err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
57  ~0ull, convert_access(acc), 0, 0, &mr->mmr);
58  if (err)
59  goto err_free;
60 
61  err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
62  if (err)
63  goto err_mr;
64 
65  mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
66  mr->umem = NULL;
67 
68  return &mr->ibmr;
69 
70 err_mr:
71  mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
72 
73 err_free:
74  kfree(mr);
75 
76  return ERR_PTR(err);
77 }
78 
80  struct ib_umem *umem)
81 {
82  u64 *pages;
83  struct ib_umem_chunk *chunk;
84  int i, j, k;
85  int n;
86  int len;
87  int err = 0;
88 
89  pages = (u64 *) __get_free_page(GFP_KERNEL);
90  if (!pages)
91  return -ENOMEM;
92 
93  i = n = 0;
94 
95  list_for_each_entry(chunk, &umem->chunk_list, list)
96  for (j = 0; j < chunk->nmap; ++j) {
97  len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
98  for (k = 0; k < len; ++k) {
99  pages[i++] = sg_dma_address(&chunk->page_list[j]) +
100  umem->page_size * k;
101  /*
102  * Be friendly to mlx4_write_mtt() and
103  * pass it chunks of appropriate size.
104  */
105  if (i == PAGE_SIZE / sizeof (u64)) {
106  err = mlx4_write_mtt(dev->dev, mtt, n,
107  i, pages);
108  if (err)
109  goto out;
110  n += i;
111  i = 0;
112  }
113  }
114  }
115 
116  if (i)
117  err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
118 
119 out:
120  free_page((unsigned long) pages);
121  return err;
122 }
123 
125  u64 virt_addr, int access_flags,
126  struct ib_udata *udata)
127 {
128  struct mlx4_ib_dev *dev = to_mdev(pd->device);
129  struct mlx4_ib_mr *mr;
130  int shift;
131  int err;
132  int n;
133 
134  mr = kmalloc(sizeof *mr, GFP_KERNEL);
135  if (!mr)
136  return ERR_PTR(-ENOMEM);
137 
138  mr->umem = ib_umem_get(pd->uobject->context, start, length,
139  access_flags, 0);
140  if (IS_ERR(mr->umem)) {
141  err = PTR_ERR(mr->umem);
142  goto err_free;
143  }
144 
145  n = ib_umem_page_count(mr->umem);
146  shift = ilog2(mr->umem->page_size);
147 
148  err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
149  convert_access(access_flags), n, shift, &mr->mmr);
150  if (err)
151  goto err_umem;
152 
153  err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
154  if (err)
155  goto err_mr;
156 
157  err = mlx4_mr_enable(dev->dev, &mr->mmr);
158  if (err)
159  goto err_mr;
160 
161  mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
162 
163  return &mr->ibmr;
164 
165 err_mr:
166  mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
167 
168 err_umem:
169  ib_umem_release(mr->umem);
170 
171 err_free:
172  kfree(mr);
173 
174  return ERR_PTR(err);
175 }
176 
178 {
179  struct mlx4_ib_mr *mr = to_mmr(ibmr);
180 
181  mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
182  if (mr->umem)
183  ib_umem_release(mr->umem);
184  kfree(mr);
185 
186  return 0;
187 }
188 
190  int max_page_list_len)
191 {
192  struct mlx4_ib_dev *dev = to_mdev(pd->device);
193  struct mlx4_ib_mr *mr;
194  int err;
195 
196  mr = kmalloc(sizeof *mr, GFP_KERNEL);
197  if (!mr)
198  return ERR_PTR(-ENOMEM);
199 
200  err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
201  max_page_list_len, 0, &mr->mmr);
202  if (err)
203  goto err_free;
204 
205  err = mlx4_mr_enable(dev->dev, &mr->mmr);
206  if (err)
207  goto err_mr;
208 
209  mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
210  mr->umem = NULL;
211 
212  return &mr->ibmr;
213 
214 err_mr:
215  mlx4_mr_free(dev->dev, &mr->mmr);
216 
217 err_free:
218  kfree(mr);
219  return ERR_PTR(err);
220 }
221 
223  int page_list_len)
224 {
225  struct mlx4_ib_dev *dev = to_mdev(ibdev);
226  struct mlx4_ib_fast_reg_page_list *mfrpl;
227  int size = page_list_len * sizeof (u64);
228 
229  if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
230  return ERR_PTR(-EINVAL);
231 
232  mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
233  if (!mfrpl)
234  return ERR_PTR(-ENOMEM);
235 
236  mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
237  if (!mfrpl->ibfrpl.page_list)
238  goto err_free;
239 
240  mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
241  size, &mfrpl->map,
242  GFP_KERNEL);
243  if (!mfrpl->mapped_page_list)
244  goto err_free;
245 
246  WARN_ON(mfrpl->map & 0x3f);
247 
248  return &mfrpl->ibfrpl;
249 
250 err_free:
251  kfree(mfrpl->ibfrpl.page_list);
252  kfree(mfrpl);
253  return ERR_PTR(-ENOMEM);
254 }
255 
257 {
258  struct mlx4_ib_dev *dev = to_mdev(page_list->device);
259  struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
260  int size = page_list->max_page_list_len * sizeof (u64);
261 
262  dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
263  mfrpl->map);
264  kfree(mfrpl->ibfrpl.page_list);
265  kfree(mfrpl);
266 }
267 
268 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
269  struct ib_fmr_attr *fmr_attr)
270 {
271  struct mlx4_ib_dev *dev = to_mdev(pd->device);
272  struct mlx4_ib_fmr *fmr;
273  int err = -ENOMEM;
274 
275  fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
276  if (!fmr)
277  return ERR_PTR(-ENOMEM);
278 
279  err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
280  fmr_attr->max_pages, fmr_attr->max_maps,
281  fmr_attr->page_shift, &fmr->mfmr);
282  if (err)
283  goto err_free;
284 
285  err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
286  if (err)
287  goto err_mr;
288 
289  fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
290 
291  return &fmr->ibfmr;
292 
293 err_mr:
294  mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
295 
296 err_free:
297  kfree(fmr);
298 
299  return ERR_PTR(err);
300 }
301 
302 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
303  int npages, u64 iova)
304 {
305  struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
306  struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
307 
308  return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
309  &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
310 }
311 
312 int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
313 {
314  struct ib_fmr *ibfmr;
315  int err;
316  struct mlx4_dev *mdev = NULL;
317 
318  list_for_each_entry(ibfmr, fmr_list, list) {
319  if (mdev && to_mdev(ibfmr->device)->dev != mdev)
320  return -EINVAL;
321  mdev = to_mdev(ibfmr->device)->dev;
322  }
323 
324  if (!mdev)
325  return 0;
326 
327  list_for_each_entry(ibfmr, fmr_list, list) {
328  struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
329 
330  mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
331  }
332 
333  /*
334  * Make sure all MPT status updates are visible before issuing
335  * SYNC_TPT firmware command.
336  */
337  wmb();
338 
339  err = mlx4_SYNC_TPT(mdev);
340  if (err)
341  pr_warn("SYNC_TPT error %d when "
342  "unmapping FMRs\n", err);
343 
344  return 0;
345 }
346 
348 {
349  struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
350  struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
351  int err;
352 
353  err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
354 
355  if (!err)
356  kfree(ifmr);
357 
358  return err;
359 }