Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mthca_memfree.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Cisco Systems. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses. You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or
13  * without modification, are permitted provided that the following
14  * conditions are met:
15  *
16  * - Redistributions of source code must retain the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer.
19  *
20  * - Redistributions in binary form must reproduce the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer in the documentation and/or other materials
23  * provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 
40 #include <asm/page.h>
41 
42 #include "mthca_memfree.h"
43 #include "mthca_dev.h"
44 #include "mthca_cmd.h"
45 
46 /*
47  * We allocate in as big chunks as we can, up to a maximum of 256 KB
48  * per chunk.
49  */
50 enum {
53 };
54 
56  struct mutex mutex;
57  struct {
59  struct scatterlist mem;
60  int refcount;
61  } page[0];
62 };
63 
64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
65 {
66  int i;
67 
68  if (chunk->nsg > 0)
69  pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
71 
72  for (i = 0; i < chunk->npages; ++i)
73  __free_pages(sg_page(&chunk->mem[i]),
74  get_order(chunk->mem[i].length));
75 }
76 
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
78 {
79  int i;
80 
81  for (i = 0; i < chunk->npages; ++i) {
82  dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
83  lowmem_page_address(sg_page(&chunk->mem[i])),
84  sg_dma_address(&chunk->mem[i]));
85  }
86 }
87 
88 void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
89 {
90  struct mthca_icm_chunk *chunk, *tmp;
91 
92  if (!icm)
93  return;
94 
95  list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
96  if (coherent)
97  mthca_free_icm_coherent(dev, chunk);
98  else
99  mthca_free_icm_pages(dev, chunk);
100 
101  kfree(chunk);
102  }
103 
104  kfree(icm);
105 }
106 
107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
108 {
109  struct page *page;
110 
111  /*
112  * Use __GFP_ZERO because buggy firmware assumes ICM pages are
113  * cleared, and subtle failures are seen if they aren't.
114  */
115  page = alloc_pages(gfp_mask | __GFP_ZERO, order);
116  if (!page)
117  return -ENOMEM;
118 
119  sg_set_page(mem, page, PAGE_SIZE << order, 0);
120  return 0;
121 }
122 
123 static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
124  int order, gfp_t gfp_mask)
125 {
126  void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
127  gfp_mask);
128  if (!buf)
129  return -ENOMEM;
130 
131  sg_set_buf(mem, buf, PAGE_SIZE << order);
132  BUG_ON(mem->offset);
133  sg_dma_len(mem) = PAGE_SIZE << order;
134  return 0;
135 }
136 
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
138  gfp_t gfp_mask, int coherent)
139 {
140  struct mthca_icm *icm;
141  struct mthca_icm_chunk *chunk = NULL;
142  int cur_order;
143  int ret;
144 
145  /* We use sg_set_buf for coherent allocs, which assumes low memory */
146  BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
147 
148  icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
149  if (!icm)
150  return icm;
151 
152  icm->refcount = 0;
153  INIT_LIST_HEAD(&icm->chunk_list);
154 
155  cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
156 
157  while (npages > 0) {
158  if (!chunk) {
159  chunk = kmalloc(sizeof *chunk,
160  gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
161  if (!chunk)
162  goto fail;
163 
165  chunk->npages = 0;
166  chunk->nsg = 0;
167  list_add_tail(&chunk->list, &icm->chunk_list);
168  }
169 
170  while (1 << cur_order > npages)
171  --cur_order;
172 
173  if (coherent)
174  ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
175  &chunk->mem[chunk->npages],
176  cur_order, gfp_mask);
177  else
178  ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
179  cur_order, gfp_mask);
180 
181  if (!ret) {
182  ++chunk->npages;
183 
184  if (coherent)
185  ++chunk->nsg;
186  else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
187  chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
188  chunk->npages,
190 
191  if (chunk->nsg <= 0)
192  goto fail;
193  }
194 
195  if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
196  chunk = NULL;
197 
198  npages -= 1 << cur_order;
199  } else {
200  --cur_order;
201  if (cur_order < 0)
202  goto fail;
203  }
204  }
205 
206  if (!coherent && chunk) {
207  chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
208  chunk->npages,
210 
211  if (chunk->nsg <= 0)
212  goto fail;
213  }
214 
215  return icm;
216 
217 fail:
218  mthca_free_icm(dev, icm, coherent);
219  return NULL;
220 }
221 
222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
223 {
224  int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
225  int ret = 0;
226 
227  mutex_lock(&table->mutex);
228 
229  if (table->icm[i]) {
230  ++table->icm[i]->refcount;
231  goto out;
232  }
233 
235  (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
236  __GFP_NOWARN, table->coherent);
237  if (!table->icm[i]) {
238  ret = -ENOMEM;
239  goto out;
240  }
241 
242  if (mthca_MAP_ICM(dev, table->icm[i],
243  table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
244  mthca_free_icm(dev, table->icm[i], table->coherent);
245  table->icm[i] = NULL;
246  ret = -ENOMEM;
247  goto out;
248  }
249 
250  ++table->icm[i]->refcount;
251 
252 out:
253  mutex_unlock(&table->mutex);
254  return ret;
255 }
256 
257 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
258 {
259  int i;
260 
261  if (!mthca_is_memfree(dev))
262  return;
263 
264  i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
265 
266  mutex_lock(&table->mutex);
267 
268  if (--table->icm[i]->refcount == 0) {
269  mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
270  MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
271  mthca_free_icm(dev, table->icm[i], table->coherent);
272  table->icm[i] = NULL;
273  }
274 
275  mutex_unlock(&table->mutex);
276 }
277 
279 {
280  int idx, offset, dma_offset, i;
281  struct mthca_icm_chunk *chunk;
282  struct mthca_icm *icm;
283  struct page *page = NULL;
284 
285  if (!table->lowmem)
286  return NULL;
287 
288  mutex_lock(&table->mutex);
289 
290  idx = (obj & (table->num_obj - 1)) * table->obj_size;
291  icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
292  dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
293 
294  if (!icm)
295  goto out;
296 
297  list_for_each_entry(chunk, &icm->chunk_list, list) {
298  for (i = 0; i < chunk->npages; ++i) {
299  if (dma_handle && dma_offset >= 0) {
300  if (sg_dma_len(&chunk->mem[i]) > dma_offset)
301  *dma_handle = sg_dma_address(&chunk->mem[i]) +
302  dma_offset;
303  dma_offset -= sg_dma_len(&chunk->mem[i]);
304  }
305  /* DMA mapping can merge pages but not split them,
306  * so if we found the page, dma_handle has already
307  * been assigned to. */
308  if (chunk->mem[i].length > offset) {
309  page = sg_page(&chunk->mem[i]);
310  goto out;
311  }
312  offset -= chunk->mem[i].length;
313  }
314  }
315 
316 out:
317  mutex_unlock(&table->mutex);
318  return page ? lowmem_page_address(page) + offset : NULL;
319 }
320 
322  int start, int end)
323 {
324  int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
325  int i, err;
326 
327  for (i = start; i <= end; i += inc) {
328  err = mthca_table_get(dev, table, i);
329  if (err)
330  goto fail;
331  }
332 
333  return 0;
334 
335 fail:
336  while (i > start) {
337  i -= inc;
338  mthca_table_put(dev, table, i);
339  }
340 
341  return err;
342 }
343 
345  int start, int end)
346 {
347  int i;
348 
349  if (!mthca_is_memfree(dev))
350  return;
351 
352  for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
353  mthca_table_put(dev, table, i);
354 }
355 
357  u64 virt, int obj_size,
358  int nobj, int reserved,
359  int use_lowmem, int use_coherent)
360 {
361  struct mthca_icm_table *table;
362  int obj_per_chunk;
363  int num_icm;
364  unsigned chunk_size;
365  int i;
366 
367  obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
368  num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
369 
370  table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
371  if (!table)
372  return NULL;
373 
374  table->virt = virt;
375  table->num_icm = num_icm;
376  table->num_obj = nobj;
377  table->obj_size = obj_size;
378  table->lowmem = use_lowmem;
379  table->coherent = use_coherent;
380  mutex_init(&table->mutex);
381 
382  for (i = 0; i < num_icm; ++i)
383  table->icm[i] = NULL;
384 
385  for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
386  chunk_size = MTHCA_TABLE_CHUNK_SIZE;
387  if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
388  chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
389 
390  table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
391  (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
392  __GFP_NOWARN, use_coherent);
393  if (!table->icm[i])
394  goto err;
395  if (mthca_MAP_ICM(dev, table->icm[i],
396  virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
397  mthca_free_icm(dev, table->icm[i], table->coherent);
398  table->icm[i] = NULL;
399  goto err;
400  }
401 
402  /*
403  * Add a reference to this ICM chunk so that it never
404  * gets freed (since it contains reserved firmware objects).
405  */
406  ++table->icm[i]->refcount;
407  }
408 
409  return table;
410 
411 err:
412  for (i = 0; i < num_icm; ++i)
413  if (table->icm[i]) {
414  mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
415  MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
416  mthca_free_icm(dev, table->icm[i], table->coherent);
417  }
418 
419  kfree(table);
420 
421  return NULL;
422 }
423 
425 {
426  int i;
427 
428  for (i = 0; i < table->num_icm; ++i)
429  if (table->icm[i]) {
430  mthca_UNMAP_ICM(dev,
431  table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
432  MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
433  mthca_free_icm(dev, table->icm[i], table->coherent);
434  }
435 
436  kfree(table);
437 }
438 
439 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
440 {
441  return dev->uar_table.uarc_base +
442  uar->index * dev->uar_table.uarc_size +
443  page * MTHCA_ICM_PAGE_SIZE;
444 }
445 
446 int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
447  struct mthca_user_db_table *db_tab, int index, u64 uaddr)
448 {
449  struct page *pages[1];
450  int ret = 0;
451  int i;
452 
453  if (!mthca_is_memfree(dev))
454  return 0;
455 
456  if (index < 0 || index > dev->uar_table.uarc_size / 8)
457  return -EINVAL;
458 
459  mutex_lock(&db_tab->mutex);
460 
461  i = index / MTHCA_DB_REC_PER_PAGE;
462 
463  if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
464  (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
465  (uaddr & 4095)) {
466  ret = -EINVAL;
467  goto out;
468  }
469 
470  if (db_tab->page[i].refcount) {
471  ++db_tab->page[i].refcount;
472  goto out;
473  }
474 
475  ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
476  pages, NULL);
477  if (ret < 0)
478  goto out;
479 
480  sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
481  uaddr & ~PAGE_MASK);
482 
483  ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
484  if (ret < 0) {
485  put_page(pages[0]);
486  goto out;
487  }
488 
489  ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
490  mthca_uarc_virt(dev, uar, i));
491  if (ret) {
492  pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
493  put_page(sg_page(&db_tab->page[i].mem));
494  goto out;
495  }
496 
497  db_tab->page[i].uvirt = uaddr;
498  db_tab->page[i].refcount = 1;
499 
500 out:
501  mutex_unlock(&db_tab->mutex);
502  return ret;
503 }
504 
505 void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
506  struct mthca_user_db_table *db_tab, int index)
507 {
508  if (!mthca_is_memfree(dev))
509  return;
510 
511  /*
512  * To make our bookkeeping simpler, we don't unmap DB
513  * pages until we clean up the whole db table.
514  */
515 
516  mutex_lock(&db_tab->mutex);
517 
518  --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
519 
520  mutex_unlock(&db_tab->mutex);
521 }
522 
524 {
525  struct mthca_user_db_table *db_tab;
526  int npages;
527  int i;
528 
529  if (!mthca_is_memfree(dev))
530  return NULL;
531 
532  npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
533  db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
534  if (!db_tab)
535  return ERR_PTR(-ENOMEM);
536 
537  mutex_init(&db_tab->mutex);
538  for (i = 0; i < npages; ++i) {
539  db_tab->page[i].refcount = 0;
540  db_tab->page[i].uvirt = 0;
541  sg_init_table(&db_tab->page[i].mem, 1);
542  }
543 
544  return db_tab;
545 }
546 
547 void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
548  struct mthca_user_db_table *db_tab)
549 {
550  int i;
551 
552  if (!mthca_is_memfree(dev))
553  return;
554 
555  for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
556  if (db_tab->page[i].uvirt) {
557  mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
558  pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
559  put_page(sg_page(&db_tab->page[i].mem));
560  }
561  }
562 
563  kfree(db_tab);
564 }
565 
567  u32 qn, __be32 **db)
568 {
569  int group;
570  int start, end, dir;
571  int i, j;
572  struct mthca_db_page *page;
573  int ret = 0;
574 
575  mutex_lock(&dev->db_tab->mutex);
576 
577  switch (type) {
579  case MTHCA_DB_TYPE_SQ:
580  group = 0;
581  start = 0;
582  end = dev->db_tab->max_group1;
583  dir = 1;
584  break;
585 
587  case MTHCA_DB_TYPE_RQ:
588  case MTHCA_DB_TYPE_SRQ:
589  group = 1;
590  start = dev->db_tab->npages - 1;
591  end = dev->db_tab->min_group2;
592  dir = -1;
593  break;
594 
595  default:
596  ret = -EINVAL;
597  goto out;
598  }
599 
600  for (i = start; i != end; i += dir)
601  if (dev->db_tab->page[i].db_rec &&
602  !bitmap_full(dev->db_tab->page[i].used,
604  page = dev->db_tab->page + i;
605  goto found;
606  }
607 
608  for (i = start; i != end; i += dir)
609  if (!dev->db_tab->page[i].db_rec) {
610  page = dev->db_tab->page + i;
611  goto alloc;
612  }
613 
614  if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
615  ret = -ENOMEM;
616  goto out;
617  }
618 
619  if (group == 0)
620  ++dev->db_tab->max_group1;
621  else
622  --dev->db_tab->min_group2;
623 
624  page = dev->db_tab->page + end;
625 
626 alloc:
628  &page->mapping, GFP_KERNEL);
629  if (!page->db_rec) {
630  ret = -ENOMEM;
631  goto out;
632  }
633  memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
634 
635  ret = mthca_MAP_ICM_page(dev, page->mapping,
636  mthca_uarc_virt(dev, &dev->driver_uar, i));
637  if (ret) {
639  page->db_rec, page->mapping);
640  goto out;
641  }
642 
643  bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
644 
645 found:
647  set_bit(j, page->used);
648 
649  if (group == 1)
650  j = MTHCA_DB_REC_PER_PAGE - 1 - j;
651 
652  ret = i * MTHCA_DB_REC_PER_PAGE + j;
653 
654  page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
655 
656  *db = (__be32 *) &page->db_rec[j];
657 
658 out:
659  mutex_unlock(&dev->db_tab->mutex);
660 
661  return ret;
662 }
663 
664 void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
665 {
666  int i, j;
667  struct mthca_db_page *page;
668 
669  i = db_index / MTHCA_DB_REC_PER_PAGE;
670  j = db_index % MTHCA_DB_REC_PER_PAGE;
671 
672  page = dev->db_tab->page + i;
673 
674  mutex_lock(&dev->db_tab->mutex);
675 
676  page->db_rec[j] = 0;
677  if (i >= dev->db_tab->min_group2)
678  j = MTHCA_DB_REC_PER_PAGE - 1 - j;
679  clear_bit(j, page->used);
680 
681  if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
682  i >= dev->db_tab->max_group1 - 1) {
683  mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
684 
686  page->db_rec, page->mapping);
687  page->db_rec = NULL;
688 
689  if (i == dev->db_tab->max_group1) {
690  --dev->db_tab->max_group1;
691  /* XXX may be able to unmap more pages now */
692  }
693  if (i == dev->db_tab->min_group2)
694  ++dev->db_tab->min_group2;
695  }
696 
697  mutex_unlock(&dev->db_tab->mutex);
698 }
699 
700 int mthca_init_db_tab(struct mthca_dev *dev)
701 {
702  int i;
703 
704  if (!mthca_is_memfree(dev))
705  return 0;
706 
707  dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
708  if (!dev->db_tab)
709  return -ENOMEM;
710 
711  mutex_init(&dev->db_tab->mutex);
712 
713  dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
714  dev->db_tab->max_group1 = 0;
715  dev->db_tab->min_group2 = dev->db_tab->npages - 1;
716 
717  dev->db_tab->page = kmalloc(dev->db_tab->npages *
718  sizeof *dev->db_tab->page,
719  GFP_KERNEL);
720  if (!dev->db_tab->page) {
721  kfree(dev->db_tab);
722  return -ENOMEM;
723  }
724 
725  for (i = 0; i < dev->db_tab->npages; ++i)
726  dev->db_tab->page[i].db_rec = NULL;
727 
728  return 0;
729 }
730 
732 {
733  int i;
734 
735  if (!mthca_is_memfree(dev))
736  return;
737 
738  /*
739  * Because we don't always free our UARC pages when they
740  * become empty to make mthca_free_db() simpler we need to
741  * make a sweep through the doorbell pages and free any
742  * leftover pages now.
743  */
744  for (i = 0; i < dev->db_tab->npages; ++i) {
745  if (!dev->db_tab->page[i].db_rec)
746  continue;
747 
748  if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
749  mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
750 
751  mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
752 
754  dev->db_tab->page[i].db_rec,
755  dev->db_tab->page[i].mapping);
756  }
757 
758  kfree(dev->db_tab->page);
759  kfree(dev->db_tab);
760 }