34 #include <linux/errno.h>
37 #include <linux/slab.h>
62 for (i = 0; i < chunk->
npages; ++
i)
71 for (i = 0; i < chunk->
npages; ++
i)
73 lowmem_page_address(sg_page(&chunk->
mem[i])),
86 mlx4_free_icm_coherent(dev, chunk);
88 mlx4_free_icm_pages(dev, chunk);
104 sg_set_page(mem, page,
PAGE_SIZE << order, 0);
109 int order,
gfp_t gfp_mask)
116 sg_set_buf(mem, buf,
PAGE_SIZE << order);
123 gfp_t gfp_mask,
int coherent)
155 while (1 << cur_order > npages)
159 ret = mlx4_alloc_icm_coherent(&dev->
pdev->dev,
161 cur_order, gfp_mask);
163 ret = mlx4_alloc_icm_pages(&chunk->
mem[chunk->
npages],
164 cur_order, gfp_mask);
178 chunk->
nsg = pci_map_sg(dev->
pdev, chunk->
mem,
189 npages -= 1 << cur_order;
192 if (!coherent && chunk) {
193 chunk->
nsg = pci_map_sg(dev->
pdev, chunk->
mem,
213 static int mlx4_UNMAP_ICM(
struct mlx4_dev *dev,
u64 virt,
u32 page_count)
239 ++table->
icm[
i]->refcount;
246 if (!table->
icm[i]) {
251 if (mlx4_MAP_ICM(dev, table->
icm[i], table->
virt +
259 ++table->
icm[
i]->refcount;
275 if (--table->
icm[i]->refcount == 0) {
277 mlx4_UNMAP_ICM(dev, table->
virt + offset,
293 struct page *page =
NULL;
308 for (i = 0; i < chunk->
npages; ++
i) {
309 if (dma_handle && dma_offset >= 0) {
320 if (chunk->
mem[i].length > offset) {
321 page = sg_page(&chunk->
mem[i]);
324 offset -= chunk->
mem[
i].length;
330 return page ? lowmem_page_address(page) + offset :
NULL;
340 for (i = start; i <=
end; i +=
inc) {
368 int use_lowmem,
int use_coherent)
377 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
386 table->
lowmem = use_lowmem;
390 size = (
u64) nobj * obj_size;
402 if (mlx4_MAP_ICM(dev, table->
icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
412 ++table->
icm[
i]->refcount;
418 for (i = 0; i < num_icm; ++
i)
434 for (i = 0; i < table->
num_icm; ++
i)