Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mem.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem.h>
34 #include <linux/atomic.h>
35 
36 #include "iw_cxgb4.h"
37 
38 #define T4_ULPTX_MIN_IO 32
39 #define C4IW_MAX_INLINE_SIZE 96
40 
41 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42  void *data)
43 {
44  struct sk_buff *skb;
45  struct ulp_mem_io *req;
46  struct ulptx_idata *sc;
47  u8 wr_len, *to_dp, *from_dp;
48  int copy_len, num_wqe, i, ret = 0;
49  struct c4iw_wr_wait wr_wait;
50 
51  addr &= 0x7FFFFFF;
52  PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53  num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54  c4iw_init_wr_wait(&wr_wait);
55  for (i = 0; i < num_wqe; i++) {
56 
57  copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58  len;
59  wr_len = roundup(sizeof *req + sizeof *sc +
60  roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61 
62  skb = alloc_skb(wr_len, GFP_KERNEL);
63  if (!skb)
64  return -ENOMEM;
65  set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66 
67  req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68  memset(req, 0, wr_len);
69  INIT_ULPTX_WR(req, wr_len, 0, 0);
70 
71  if (i == (num_wqe-1)) {
72  req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73  FW_WR_COMPL(1));
74  req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
75  } else
76  req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77  req->wr.wr_mid = cpu_to_be32(
78  FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79 
80  req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
82  DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83  req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84  16));
85  req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86 
87  sc = (struct ulptx_idata *)(req + 1);
89  sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90 
91  to_dp = (u8 *)(sc + 1);
92  from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93  if (data)
94  memcpy(to_dp, from_dp, copy_len);
95  else
96  memset(to_dp, 0, copy_len);
97  if (copy_len % T4_ULPTX_MIN_IO)
98  memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99  (copy_len % T4_ULPTX_MIN_IO));
100  ret = c4iw_ofld_send(rdev, skb);
101  if (ret)
102  return ret;
103  len -= C4IW_MAX_INLINE_SIZE;
104  }
105 
106  ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
107  return ret;
108 }
109 
110 /*
111  * Build and write a TPT entry.
112  * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
113  * pbl_size and pbl_addr
114  * OUT: stag index
115  */
116 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
117  u32 *stag, u8 stag_state, u32 pdid,
119  int bind_enabled, u32 zbva, u64 to,
120  u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
121 {
122  int err;
123  struct fw_ri_tpte tpt;
124  u32 stag_idx;
125  static atomic_t key;
126 
127  if (c4iw_fatal_error(rdev))
128  return -EIO;
129 
130  stag_state = stag_state > 0;
131  stag_idx = (*stag) >> 8;
132 
133  if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
134  stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
135  if (!stag_idx)
136  return -ENOMEM;
137  mutex_lock(&rdev->stats.lock);
138  rdev->stats.stag.cur += 32;
139  if (rdev->stats.stag.cur > rdev->stats.stag.max)
140  rdev->stats.stag.max = rdev->stats.stag.cur;
141  mutex_unlock(&rdev->stats.lock);
142  *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
143  }
144  PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
145  __func__, stag_state, type, pdid, stag_idx);
146 
147  /* write TPT entry */
148  if (reset_tpt_entry)
149  memset(&tpt, 0, sizeof(tpt));
150  else {
151  tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
153  V_FW_RI_TPTE_STAGSTATE(stag_state) |
155  tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
156  (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
159  V_FW_RI_TPTE_PS(page_size));
160  tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
161  V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
162  tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
163  tpt.va_hi = cpu_to_be32((u32)(to >> 32));
164  tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
165  tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
166  tpt.len_hi = cpu_to_be32((u32)(len >> 32));
167  }
168  err = write_adapter_mem(rdev, stag_idx +
169  (rdev->lldi.vr->stag.start >> 5),
170  sizeof(tpt), &tpt);
171 
172  if (reset_tpt_entry) {
173  c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
174  mutex_lock(&rdev->stats.lock);
175  rdev->stats.stag.cur -= 32;
176  mutex_unlock(&rdev->stats.lock);
177  }
178  return err;
179 }
180 
181 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
182  u32 pbl_addr, u32 pbl_size)
183 {
184  int err;
185 
186  PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187  __func__, pbl_addr, rdev->lldi.vr->pbl.start,
188  pbl_size);
189 
190  err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
191  return err;
192 }
193 
194 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
195  u32 pbl_addr)
196 {
197  return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
198  pbl_size, pbl_addr);
199 }
200 
201 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
202 {
203  *stag = T4_STAG_UNSET;
204  return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
205  0UL, 0, 0, 0, 0);
206 }
207 
208 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
209 {
210  return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
211  0);
212 }
213 
214 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
215  u32 pbl_size, u32 pbl_addr)
216 {
217  *stag = T4_STAG_UNSET;
218  return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
219  0UL, 0, 0, pbl_size, pbl_addr);
220 }
221 
222 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
223 {
224  u32 mmid;
225 
226  mhp->attr.state = 1;
227  mhp->attr.stag = stag;
228  mmid = stag >> 8;
229  mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
230  PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231  return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
232 }
233 
234 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
235  struct c4iw_mr *mhp, int shift)
236 {
237  u32 stag = T4_STAG_UNSET;
238  int ret;
239 
240  ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
241  FW_RI_STAG_NSMR, mhp->attr.perms,
242  mhp->attr.mw_bind_enable, mhp->attr.zbva,
243  mhp->attr.va_fbo, mhp->attr.len, shift - 12,
244  mhp->attr.pbl_size, mhp->attr.pbl_addr);
245  if (ret)
246  return ret;
247 
248  ret = finish_mem_reg(mhp, stag);
249  if (ret)
250  dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
251  mhp->attr.pbl_addr);
252  return ret;
253 }
254 
255 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
256  struct c4iw_mr *mhp, int shift, int npages)
257 {
258  u32 stag;
259  int ret;
260 
261  if (npages > mhp->attr.pbl_size)
262  return -ENOMEM;
263 
264  stag = mhp->attr.stag;
265  ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266  FW_RI_STAG_NSMR, mhp->attr.perms,
267  mhp->attr.mw_bind_enable, mhp->attr.zbva,
268  mhp->attr.va_fbo, mhp->attr.len, shift - 12,
269  mhp->attr.pbl_size, mhp->attr.pbl_addr);
270  if (ret)
271  return ret;
272 
273  ret = finish_mem_reg(mhp, stag);
274  if (ret)
275  dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276  mhp->attr.pbl_addr);
277 
278  return ret;
279 }
280 
281 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
282 {
283  mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
284  npages << 3);
285 
286  if (!mhp->attr.pbl_addr)
287  return -ENOMEM;
288 
289  mhp->attr.pbl_size = npages;
290 
291  return 0;
292 }
293 
294 static int build_phys_page_list(struct ib_phys_buf *buffer_list,
295  int num_phys_buf, u64 *iova_start,
296  u64 *total_size, int *npages,
297  int *shift, __be64 **page_list)
298 {
299  u64 mask;
300  int i, j, n;
301 
302  mask = 0;
303  *total_size = 0;
304  for (i = 0; i < num_phys_buf; ++i) {
305  if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
306  return -EINVAL;
307  if (i != 0 && i != num_phys_buf - 1 &&
308  (buffer_list[i].size & ~PAGE_MASK))
309  return -EINVAL;
310  *total_size += buffer_list[i].size;
311  if (i > 0)
312  mask |= buffer_list[i].addr;
313  else
314  mask |= buffer_list[i].addr & PAGE_MASK;
315  if (i != num_phys_buf - 1)
316  mask |= buffer_list[i].addr + buffer_list[i].size;
317  else
318  mask |= (buffer_list[i].addr + buffer_list[i].size +
319  PAGE_SIZE - 1) & PAGE_MASK;
320  }
321 
322  if (*total_size > 0xFFFFFFFFULL)
323  return -ENOMEM;
324 
325  /* Find largest page shift we can use to cover buffers */
326  for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
327  if ((1ULL << *shift) & mask)
328  break;
329 
330  buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
331  buffer_list[0].addr &= ~0ull << *shift;
332 
333  *npages = 0;
334  for (i = 0; i < num_phys_buf; ++i)
335  *npages += (buffer_list[i].size +
336  (1ULL << *shift) - 1) >> *shift;
337 
338  if (!*npages)
339  return -EINVAL;
340 
341  *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
342  if (!*page_list)
343  return -ENOMEM;
344 
345  n = 0;
346  for (i = 0; i < num_phys_buf; ++i)
347  for (j = 0;
348  j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
349  ++j)
350  (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
351  ((u64) j << *shift));
352 
353  PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354  __func__, (unsigned long long)*iova_start,
355  (unsigned long long)mask, *shift, (unsigned long long)*total_size,
356  *npages);
357 
358  return 0;
359 
360 }
361 
362 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
363  struct ib_pd *pd, struct ib_phys_buf *buffer_list,
364  int num_phys_buf, int acc, u64 *iova_start)
365 {
366 
367  struct c4iw_mr mh, *mhp;
368  struct c4iw_pd *php;
369  struct c4iw_dev *rhp;
370  __be64 *page_list = NULL;
371  int shift = 0;
372  u64 total_size;
373  int npages;
374  int ret;
375 
376  PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
377 
378  /* There can be no memory windows */
379  if (atomic_read(&mr->usecnt))
380  return -EINVAL;
381 
382  mhp = to_c4iw_mr(mr);
383  rhp = mhp->rhp;
384  php = to_c4iw_pd(mr->pd);
385 
386  /* make sure we are on the same adapter */
387  if (rhp != php->rhp)
388  return -EINVAL;
389 
390  memcpy(&mh, mhp, sizeof *mhp);
391 
392  if (mr_rereg_mask & IB_MR_REREG_PD)
393  php = to_c4iw_pd(pd);
394  if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
395  mh.attr.perms = c4iw_ib_to_tpt_access(acc);
396  mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
398  }
399  if (mr_rereg_mask & IB_MR_REREG_TRANS) {
400  ret = build_phys_page_list(buffer_list, num_phys_buf,
401  iova_start,
402  &total_size, &npages,
403  &shift, &page_list);
404  if (ret)
405  return ret;
406  }
407 
408  ret = reregister_mem(rhp, php, &mh, shift, npages);
409  kfree(page_list);
410  if (ret)
411  return ret;
412  if (mr_rereg_mask & IB_MR_REREG_PD)
413  mhp->attr.pdid = php->pdid;
414  if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415  mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
416  if (mr_rereg_mask & IB_MR_REREG_TRANS) {
417  mhp->attr.zbva = 0;
418  mhp->attr.va_fbo = *iova_start;
419  mhp->attr.page_size = shift - 12;
420  mhp->attr.len = (u32) total_size;
421  mhp->attr.pbl_size = npages;
422  }
423 
424  return 0;
425 }
426 
428  struct ib_phys_buf *buffer_list,
429  int num_phys_buf, int acc, u64 *iova_start)
430 {
431  __be64 *page_list;
432  int shift;
433  u64 total_size;
434  int npages;
435  struct c4iw_dev *rhp;
436  struct c4iw_pd *php;
437  struct c4iw_mr *mhp;
438  int ret;
439 
440  PDBG("%s ib_pd %p\n", __func__, pd);
441  php = to_c4iw_pd(pd);
442  rhp = php->rhp;
443 
444  mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
445  if (!mhp)
446  return ERR_PTR(-ENOMEM);
447 
448  mhp->rhp = rhp;
449 
450  /* First check that we have enough alignment */
451  if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
452  ret = -EINVAL;
453  goto err;
454  }
455 
456  if (num_phys_buf > 1 &&
457  ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
458  ret = -EINVAL;
459  goto err;
460  }
461 
462  ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
463  &total_size, &npages, &shift,
464  &page_list);
465  if (ret)
466  goto err;
467 
468  ret = alloc_pbl(mhp, npages);
469  if (ret) {
470  kfree(page_list);
471  goto err;
472  }
473 
474  ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
475  npages);
476  kfree(page_list);
477  if (ret)
478  goto err_pbl;
479 
480  mhp->attr.pdid = php->pdid;
481  mhp->attr.zbva = 0;
482 
483  mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
484  mhp->attr.va_fbo = *iova_start;
485  mhp->attr.page_size = shift - 12;
486 
487  mhp->attr.len = (u32) total_size;
488  mhp->attr.pbl_size = npages;
489  ret = register_mem(rhp, php, mhp, shift);
490  if (ret)
491  goto err_pbl;
492 
493  return &mhp->ibmr;
494 
495 err_pbl:
496  c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
497  mhp->attr.pbl_size << 3);
498 
499 err:
500  kfree(mhp);
501  return ERR_PTR(ret);
502 
503 }
504 
505 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
506 {
507  struct c4iw_dev *rhp;
508  struct c4iw_pd *php;
509  struct c4iw_mr *mhp;
510  int ret;
511  u32 stag = T4_STAG_UNSET;
512 
513  PDBG("%s ib_pd %p\n", __func__, pd);
514  php = to_c4iw_pd(pd);
515  rhp = php->rhp;
516 
517  mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
518  if (!mhp)
519  return ERR_PTR(-ENOMEM);
520 
521  mhp->rhp = rhp;
522  mhp->attr.pdid = php->pdid;
523  mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
524  mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
525  mhp->attr.zbva = 0;
526  mhp->attr.va_fbo = 0;
527  mhp->attr.page_size = 0;
528  mhp->attr.len = ~0UL;
529  mhp->attr.pbl_size = 0;
530 
531  ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
532  FW_RI_STAG_NSMR, mhp->attr.perms,
533  mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
534  if (ret)
535  goto err1;
536 
537  ret = finish_mem_reg(mhp, stag);
538  if (ret)
539  goto err2;
540  return &mhp->ibmr;
541 err2:
542  dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
543  mhp->attr.pbl_addr);
544 err1:
545  kfree(mhp);
546  return ERR_PTR(ret);
547 }
548 
550  u64 virt, int acc, struct ib_udata *udata)
551 {
552  __be64 *pages;
553  int shift, n, len;
554  int i, j, k;
555  int err = 0;
556  struct ib_umem_chunk *chunk;
557  struct c4iw_dev *rhp;
558  struct c4iw_pd *php;
559  struct c4iw_mr *mhp;
560 
561  PDBG("%s ib_pd %p\n", __func__, pd);
562 
563  if (length == ~0ULL)
564  return ERR_PTR(-EINVAL);
565 
566  if ((length + start) < start)
567  return ERR_PTR(-EINVAL);
568 
569  php = to_c4iw_pd(pd);
570  rhp = php->rhp;
571  mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572  if (!mhp)
573  return ERR_PTR(-ENOMEM);
574 
575  mhp->rhp = rhp;
576 
577  mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
578  if (IS_ERR(mhp->umem)) {
579  err = PTR_ERR(mhp->umem);
580  kfree(mhp);
581  return ERR_PTR(err);
582  }
583 
584  shift = ffs(mhp->umem->page_size) - 1;
585 
586  n = 0;
587  list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
588  n += chunk->nents;
589 
590  err = alloc_pbl(mhp, n);
591  if (err)
592  goto err;
593 
594  pages = (__be64 *) __get_free_page(GFP_KERNEL);
595  if (!pages) {
596  err = -ENOMEM;
597  goto err_pbl;
598  }
599 
600  i = n = 0;
601 
602  list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
603  for (j = 0; j < chunk->nmap; ++j) {
604  len = sg_dma_len(&chunk->page_list[j]) >> shift;
605  for (k = 0; k < len; ++k) {
606  pages[i++] = cpu_to_be64(sg_dma_address(
607  &chunk->page_list[j]) +
608  mhp->umem->page_size * k);
609  if (i == PAGE_SIZE / sizeof *pages) {
610  err = write_pbl(&mhp->rhp->rdev,
611  pages,
612  mhp->attr.pbl_addr + (n << 3), i);
613  if (err)
614  goto pbl_done;
615  n += i;
616  i = 0;
617  }
618  }
619  }
620 
621  if (i)
622  err = write_pbl(&mhp->rhp->rdev, pages,
623  mhp->attr.pbl_addr + (n << 3), i);
624 
625 pbl_done:
626  free_page((unsigned long) pages);
627  if (err)
628  goto err_pbl;
629 
630  mhp->attr.pdid = php->pdid;
631  mhp->attr.zbva = 0;
632  mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
633  mhp->attr.va_fbo = virt;
634  mhp->attr.page_size = shift - 12;
635  mhp->attr.len = length;
636 
637  err = register_mem(rhp, php, mhp, shift);
638  if (err)
639  goto err_pbl;
640 
641  return &mhp->ibmr;
642 
643 err_pbl:
644  c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
645  mhp->attr.pbl_size << 3);
646 
647 err:
648  ib_umem_release(mhp->umem);
649  kfree(mhp);
650  return ERR_PTR(err);
651 }
652 
653 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
654 {
655  struct c4iw_dev *rhp;
656  struct c4iw_pd *php;
657  struct c4iw_mw *mhp;
658  u32 mmid;
659  u32 stag = 0;
660  int ret;
661 
662  php = to_c4iw_pd(pd);
663  rhp = php->rhp;
664  mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
665  if (!mhp)
666  return ERR_PTR(-ENOMEM);
667  ret = allocate_window(&rhp->rdev, &stag, php->pdid);
668  if (ret) {
669  kfree(mhp);
670  return ERR_PTR(ret);
671  }
672  mhp->rhp = rhp;
673  mhp->attr.pdid = php->pdid;
674  mhp->attr.type = FW_RI_STAG_MW;
675  mhp->attr.stag = stag;
676  mmid = (stag) >> 8;
677  mhp->ibmw.rkey = stag;
678  if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
679  deallocate_window(&rhp->rdev, mhp->attr.stag);
680  kfree(mhp);
681  return ERR_PTR(-ENOMEM);
682  }
683  PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
684  return &(mhp->ibmw);
685 }
686 
687 int c4iw_dealloc_mw(struct ib_mw *mw)
688 {
689  struct c4iw_dev *rhp;
690  struct c4iw_mw *mhp;
691  u32 mmid;
692 
693  mhp = to_c4iw_mw(mw);
694  rhp = mhp->rhp;
695  mmid = (mw->rkey) >> 8;
696  remove_handle(rhp, &rhp->mmidr, mmid);
697  deallocate_window(&rhp->rdev, mhp->attr.stag);
698  kfree(mhp);
699  PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
700  return 0;
701 }
702 
704 {
705  struct c4iw_dev *rhp;
706  struct c4iw_pd *php;
707  struct c4iw_mr *mhp;
708  u32 mmid;
709  u32 stag = 0;
710  int ret = 0;
711 
712  php = to_c4iw_pd(pd);
713  rhp = php->rhp;
714  mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715  if (!mhp) {
716  ret = -ENOMEM;
717  goto err;
718  }
719 
720  mhp->rhp = rhp;
721  ret = alloc_pbl(mhp, pbl_depth);
722  if (ret)
723  goto err1;
724  mhp->attr.pbl_size = pbl_depth;
725  ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
726  mhp->attr.pbl_size, mhp->attr.pbl_addr);
727  if (ret)
728  goto err2;
729  mhp->attr.pdid = php->pdid;
730  mhp->attr.type = FW_RI_STAG_NSMR;
731  mhp->attr.stag = stag;
732  mhp->attr.state = 1;
733  mmid = (stag) >> 8;
734  mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
735  if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
736  ret = -ENOMEM;
737  goto err3;
738  }
739 
740  PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
741  return &(mhp->ibmr);
742 err3:
743  dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
744  mhp->attr.pbl_addr);
745 err2:
746  c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
747  mhp->attr.pbl_size << 3);
748 err1:
749  kfree(mhp);
750 err:
751  return ERR_PTR(ret);
752 }
753 
755  int page_list_len)
756 {
757  struct c4iw_fr_page_list *c4pl;
758  struct c4iw_dev *dev = to_c4iw_dev(device);
760  int size = sizeof *c4pl + page_list_len * sizeof(u64);
761 
762  c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
763  &dma_addr, GFP_KERNEL);
764  if (!c4pl)
765  return ERR_PTR(-ENOMEM);
766 
767  dma_unmap_addr_set(c4pl, mapping, dma_addr);
768  c4pl->dma_addr = dma_addr;
769  c4pl->dev = dev;
770  c4pl->size = size;
771  c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
772  c4pl->ibpl.max_page_list_len = page_list_len;
773 
774  return &c4pl->ibpl;
775 }
776 
778 {
779  struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
780 
781  dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
782  c4pl, dma_unmap_addr(c4pl, mapping));
783 }
784 
786 {
787  struct c4iw_dev *rhp;
788  struct c4iw_mr *mhp;
789  u32 mmid;
790 
791  PDBG("%s ib_mr %p\n", __func__, ib_mr);
792  /* There can be no memory windows */
793  if (atomic_read(&ib_mr->usecnt))
794  return -EINVAL;
795 
796  mhp = to_c4iw_mr(ib_mr);
797  rhp = mhp->rhp;
798  mmid = mhp->attr.stag >> 8;
799  remove_handle(rhp, &rhp->mmidr, mmid);
800  dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
801  mhp->attr.pbl_addr);
802  if (mhp->attr.pbl_size)
803  c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
804  mhp->attr.pbl_size << 3);
805  if (mhp->kva)
806  kfree((void *) (unsigned long) mhp->kva);
807  if (mhp->umem)
808  ib_umem_release(mhp->umem);
809  PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
810  kfree(mhp);
811  return 0;
812 }