37 #include <linux/sched.h>
38 #include <linux/export.h>
41 #include <linux/slab.h>
45 #define IB_UMEM_MAX_PAGE_CHUNK \
46 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
47 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
48 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
58 for (i = 0; i < chunk->
nents; ++
i) {
82 struct page **page_list;
86 unsigned long lock_limit;
87 unsigned long cur_base;
98 return ERR_PTR(-
EPERM);
140 locked = npages +
current->mm->pinned_vm;
153 min_t(
unsigned long, npages,
155 1, !umem->
writable, page_list, vma_list);
176 for (i = 0; i < chunk->
nents; ++
i) {
178 !is_vm_hugetlb_page(vma_list[i + off]))
180 sg_set_page(&chunk->
page_list[i], page_list[i + off], PAGE_SIZE, 0);
183 chunk->
nmap = ib_dma_map_sg_attrs(context->
device,
188 if (chunk->
nmap <= 0) {
189 for (i = 0; i < chunk->
nents; ++
i)
207 __ib_umem_release(context->
device, umem, 0);
217 return ret < 0 ? ERR_PTR(ret) : umem;
226 umem->
mm->pinned_vm -= umem->
diff;
242 __ib_umem_release(umem->
context->device, umem, 1);
272 current->mm->pinned_vm -= diff;
290 for (i = 0; i < chunk->
nmap; ++
i)