63 init_completion(&mr->
comp);
77 static void deinit_qib_mregion(
struct qib_mregion *mr)
102 if (to_ipd(pd)->
user) {
103 ret = ERR_PTR(-
EPERM);
113 rval = init_qib_mregion(&mr->
mr, pd, 0);
126 mr->
mr.access_flags = acc;
132 deinit_qib_mregion(&mr->
mr);
138 static struct qib_mr *alloc_mr(
int count,
struct ib_pd *pd)
146 mr = kzalloc(
sizeof *mr + m *
sizeof mr->
mr.map[0],
GFP_KERNEL);
150 rval = init_qib_mregion(&mr->
mr, pd, count);
160 mr->
ibmr.lkey = mr->
mr.lkey;
161 mr->
ibmr.rkey = mr->
mr.lkey;
166 deinit_qib_mregion(&mr->
mr);
184 int num_phys_buf,
int acc,
u64 *iova_start)
190 mr = alloc_mr(num_phys_buf, pd);
192 ret = (
struct ib_mr *)mr;
196 mr->
mr.user_base = *iova_start;
197 mr->
mr.iova = *iova_start;
198 mr->
mr.access_flags = acc;
202 for (i = 0; i < num_phys_buf; i++) {
203 mr->
mr.map[
m]->segs[
n].vaddr = (
void *) buffer_list[i].
addr;
204 mr->
mr.map[
m]->segs[
n].length = buffer_list[
i].
size;
205 mr->
mr.length += buffer_list[
i].
size;
230 u64 virt_addr,
int mr_access_flags,
247 return (
void *) umem;
253 mr = alloc_mr(n, pd);
255 ret = (
struct ib_mr *)mr;
261 mr->
mr.iova = virt_addr;
264 mr->
mr.access_flags = mr_access_flags;
272 for (i = 0; i < chunk->
nents; i++) {
306 struct qib_mr *mr = to_imr(ibmr);
320 deinit_qib_mregion(&mr->
mr);
338 mr = alloc_mr(max_page_list_len, pd);
348 unsigned size = page_list_len *
sizeof(
u64);
393 fmr = kzalloc(
sizeof *fmr + m *
sizeof fmr->
mr.map[0],
GFP_KERNEL);
397 rval = init_qib_mregion(&fmr->
mr, pd, fmr_attr->
max_pages);
408 fmr->
ibfmr.rkey = fmr->
mr.lkey;
409 fmr->
ibfmr.lkey = fmr->
mr.lkey;
414 fmr->
mr.access_flags = mr_access_flags;
423 deinit_qib_mregion(&fmr->
mr);
454 if (list_len > fmr->
mr.max_segs) {
458 rkt = &to_idev(ibfmr->
device)->lk_table;
460 fmr->
mr.user_base = iova;
462 ps = 1 << fmr->
mr.page_shift;
463 fmr->
mr.length = list_len *
ps;
467 fmr->
mr.map[
m]->segs[
n].vaddr = (
void *) page_list[i];
468 fmr->
mr.map[
m]->segs[
n].length =
ps;
474 spin_unlock_irqrestore(&rkt->
lock, flags);
494 rkt = &to_idev(fmr->
ibfmr.device)->lk_table;
496 fmr->
mr.user_base = 0;
499 spin_unlock_irqrestore(&rkt->
lock, flags);
517 qib_put_mr(&fmr->
mr);
521 qib_get_mr(&fmr->
mr);
525 deinit_qib_mregion(&fmr->
mr);