37 #include <linux/sched.h>
38 #include <linux/slab.h>
72 for (i = 0; i < chunk->
npages; ++
i)
81 for (i = 0; i < chunk->
npages; ++
i) {
83 lowmem_page_address(sg_page(&chunk->
mem[i])),
97 mthca_free_icm_coherent(dev, chunk);
99 mthca_free_icm_pages(dev, chunk);
119 sg_set_page(mem, page,
PAGE_SIZE << order, 0);
124 int order,
gfp_t gfp_mask)
131 sg_set_buf(mem, buf,
PAGE_SIZE << order);
138 gfp_t gfp_mask,
int coherent)
170 while (1 << cur_order > npages)
174 ret = mthca_alloc_icm_coherent(&dev->
pdev->dev,
176 cur_order, gfp_mask);
178 ret = mthca_alloc_icm_pages(&chunk->
mem[chunk->
npages],
179 cur_order, gfp_mask);
187 chunk->
nsg = pci_map_sg(dev->
pdev, chunk->
mem,
198 npages -= 1 << cur_order;
206 if (!coherent && chunk) {
207 chunk->
nsg = pci_map_sg(dev->
pdev, chunk->
mem,
230 ++table->
icm[
i]->refcount;
237 if (!table->
icm[i]) {
250 ++table->
icm[
i]->refcount;
261 if (!mthca_is_memfree(dev))
268 if (--table->
icm[i]->refcount == 0) {
283 struct page *page =
NULL;
298 for (i = 0; i < chunk->
npages; ++
i) {
299 if (dma_handle && dma_offset >= 0) {
308 if (chunk->
mem[i].length > offset) {
309 page = sg_page(&chunk->
mem[i]);
312 offset -= chunk->
mem[
i].length;
318 return page ? lowmem_page_address(page) + offset :
NULL;
327 for (i = start; i <=
end; i +=
inc) {
349 if (!mthca_is_memfree(dev))
359 int use_lowmem,
int use_coherent)
378 table->
lowmem = use_lowmem;
396 virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
406 ++table->
icm[
i]->refcount;
428 for (i = 0; i < table->
num_icm; ++
i)
449 struct page *
pages[1];
453 if (!mthca_is_memfree(dev))
456 if (index < 0 || index > dev->
uar_table.uarc_size / 8)
464 (db_tab->
page[i].uvirt && db_tab->
page[i].uvirt != uaddr) ||
470 if (db_tab->
page[i].refcount) {
471 ++db_tab->
page[
i].refcount;
490 mthca_uarc_virt(dev, uar, i));
498 db_tab->
page[
i].refcount = 1;
508 if (!mthca_is_memfree(dev))
529 if (!mthca_is_memfree(dev))
538 for (i = 0; i < npages; ++
i) {
539 db_tab->
page[
i].refcount = 0;
540 db_tab->
page[
i].uvirt = 0;
552 if (!mthca_is_memfree(dev))
556 if (db_tab->
page[i].uvirt) {
582 end = dev->
db_tab->max_group1;
590 start = dev->
db_tab->npages - 1;
591 end = dev->
db_tab->min_group2;
600 for (i = start; i !=
end; i += dir)
601 if (dev->
db_tab->page[i].db_rec &&
602 !bitmap_full(dev->
db_tab->page[i].used,
608 for (i = start; i !=
end; i += dir)
609 if (!dev->
db_tab->page[i].db_rec) {
614 if (dev->
db_tab->max_group1 >= dev->
db_tab->min_group2 - 1) {
620 ++dev->
db_tab->max_group1;
622 --dev->
db_tab->min_group2;
677 if (i >= dev->
db_tab->min_group2)
682 i >= dev->
db_tab->max_group1 - 1) {
689 if (i == dev->
db_tab->max_group1) {
690 --dev->
db_tab->max_group1;
693 if (i == dev->
db_tab->min_group2)
694 ++dev->
db_tab->min_group2;
704 if (!mthca_is_memfree(dev))
714 dev->
db_tab->max_group1 = 0;
718 sizeof *dev->
db_tab->page,
725 for (i = 0; i < dev->
db_tab->npages; ++
i)
735 if (!mthca_is_memfree(dev))
744 for (i = 0; i < dev->
db_tab->npages; ++
i) {
745 if (!dev->
db_tab->page[i].db_rec)
749 mthca_warn(dev,
"Kernel UARC page %d not empty\n", i);
754 dev->
db_tab->page[i].db_rec,
755 dev->
db_tab->page[i].mapping);