41 #include <linux/slab.h>
47 #define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
53 void *
ret = ipz_qeit_get(queue);
68 void *
ret = ipz_qeit_get(queue);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->
pagesize;
93 #if PAGE_SHIFT < EHCA_PAGESHIFT
94 #error Kernel pages must be at least as large than eHCA pages (4K) !
107 while (f < nr_of_pages) {
127 static int alloc_small_queue_page(
struct ipz_queue *queue,
struct ehca_pd *pd)
135 if (!list_empty(&pd->
free[order]))
139 page = kmem_cache_zalloc(small_qp_cache,
GFP_KERNEL);
149 list_add(&page->
list, &pd->
free[order]);
157 list_move(&page->
list, &pd->
full[order]);
163 queue->
offset = bit << (order + 9);
167 ehca_err(pd->
ib_pd.device,
"failed to allocate small queue page");
172 static void free_small_queue_page(
struct ipz_queue *queue,
struct ehca_pd *pd)
187 if (page->
fill == 0) {
194 list_move_tail(&page->
list, &pd->
free[order]);
205 const u32 nr_of_pages,
const u32 pagesize,
206 const u32 qe_size,
const u32 nr_of_sg,
211 "is greater than kernel page size", pagesize);
236 if (!alloc_small_queue_page(queue, pd))
237 goto ipz_queue_ctor_exit0;
239 if (!alloc_queue_pages(queue, nr_of_pages))
240 goto ipz_queue_ctor_exit0;
244 ipz_queue_ctor_exit0:
246 "nr_of_pages=%x", queue, nr_of_pages);
265 free_small_queue_page(queue, pd);