Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dmapool.c
Go to the documentation of this file.
1 /*
2  * DMA Pool allocator
3  *
4  * Copyright 2001 David Brownell
5  * Copyright 2007 Intel Corporation
6  * Author: Matthew Wilcox <[email protected]>
7  *
8  * This software may be redistributed and/or modified under the terms of
9  * the GNU General Public License ("GPL") version 2 as published by the
10  * Free Software Foundation.
11  *
12  * This allocator returns small blocks of a given size which are DMA-able by
13  * the given device. It uses the dma_alloc_coherent page allocator to get
14  * new pages, then splits them up into blocks of the required size.
15  * Many older drivers still have their own code to do this.
16  *
17  * The current design of this allocator is fairly simple. The pool is
18  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19  * allocated pages. Each page in the page_list is split into blocks of at
20  * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21  * list of free blocks within the page. Used blocks aren't tracked, but we
22  * keep a count of how many are currently allocated from each page.
23  */
24 
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/wait.h>
40 
41 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42 #define DMAPOOL_DEBUG 1
43 #endif
44 
45 struct dma_pool { /* the pool */
48  size_t size;
49  struct device *dev;
50  size_t allocation;
51  size_t boundary;
52  char name[32];
53  struct list_head pools;
54 };
55 
56 struct dma_page { /* cacheable header for 'allocation' bytes */
57  struct list_head page_list;
58  void *vaddr;
60  unsigned int in_use;
61  unsigned int offset;
62 };
63 
64 static DEFINE_MUTEX(pools_lock);
65 
66 static ssize_t
67 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
68 {
69  unsigned temp;
70  unsigned size;
71  char *next;
72  struct dma_page *page;
73  struct dma_pool *pool;
74 
75  next = buf;
76  size = PAGE_SIZE;
77 
78  temp = scnprintf(next, size, "poolinfo - 0.1\n");
79  size -= temp;
80  next += temp;
81 
82  mutex_lock(&pools_lock);
83  list_for_each_entry(pool, &dev->dma_pools, pools) {
84  unsigned pages = 0;
85  unsigned blocks = 0;
86 
87  spin_lock_irq(&pool->lock);
88  list_for_each_entry(page, &pool->page_list, page_list) {
89  pages++;
90  blocks += page->in_use;
91  }
92  spin_unlock_irq(&pool->lock);
93 
94  /* per-pool info, no real statistics yet */
95  temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96  pool->name, blocks,
97  pages * (pool->allocation / pool->size),
98  pool->size, pages);
99  size -= temp;
100  next += temp;
101  }
102  mutex_unlock(&pools_lock);
103 
104  return PAGE_SIZE - size;
105 }
106 
107 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
108 
130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131  size_t size, size_t align, size_t boundary)
132 {
133  struct dma_pool *retval;
134  size_t allocation;
135 
136  if (align == 0) {
137  align = 1;
138  } else if (align & (align - 1)) {
139  return NULL;
140  }
141 
142  if (size == 0) {
143  return NULL;
144  } else if (size < 4) {
145  size = 4;
146  }
147 
148  if ((size % align) != 0)
149  size = ALIGN(size, align);
150 
151  allocation = max_t(size_t, size, PAGE_SIZE);
152 
153  if (!boundary) {
154  boundary = allocation;
155  } else if ((boundary < size) || (boundary & (boundary - 1))) {
156  return NULL;
157  }
158 
159  retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160  if (!retval)
161  return retval;
162 
163  strlcpy(retval->name, name, sizeof(retval->name));
164 
165  retval->dev = dev;
166 
167  INIT_LIST_HEAD(&retval->page_list);
168  spin_lock_init(&retval->lock);
169  retval->size = size;
170  retval->boundary = boundary;
171  retval->allocation = allocation;
172 
173  if (dev) {
174  int ret;
175 
176  mutex_lock(&pools_lock);
177  if (list_empty(&dev->dma_pools))
178  ret = device_create_file(dev, &dev_attr_pools);
179  else
180  ret = 0;
181  /* note: not currently insisting "name" be unique */
182  if (!ret)
183  list_add(&retval->pools, &dev->dma_pools);
184  else {
185  kfree(retval);
186  retval = NULL;
187  }
188  mutex_unlock(&pools_lock);
189  } else
190  INIT_LIST_HEAD(&retval->pools);
191 
192  return retval;
193 }
195 
196 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
197 {
198  unsigned int offset = 0;
199  unsigned int next_boundary = pool->boundary;
200 
201  do {
202  unsigned int next = offset + pool->size;
203  if (unlikely((next + pool->size) >= next_boundary)) {
204  next = next_boundary;
205  next_boundary += pool->boundary;
206  }
207  *(int *)(page->vaddr + offset) = next;
208  offset = next;
209  } while (offset < pool->allocation);
210 }
211 
212 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
213 {
214  struct dma_page *page;
215 
216  page = kmalloc(sizeof(*page), mem_flags);
217  if (!page)
218  return NULL;
219  page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
220  &page->dma, mem_flags);
221  if (page->vaddr) {
222 #ifdef DMAPOOL_DEBUG
223  memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
224 #endif
225  pool_initialise_page(pool, page);
226  page->in_use = 0;
227  page->offset = 0;
228  } else {
229  kfree(page);
230  page = NULL;
231  }
232  return page;
233 }
234 
235 static inline int is_page_busy(struct dma_page *page)
236 {
237  return page->in_use != 0;
238 }
239 
240 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
241 {
242  dma_addr_t dma = page->dma;
243 
244 #ifdef DMAPOOL_DEBUG
245  memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
246 #endif
247  dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
248  list_del(&page->page_list);
249  kfree(page);
250 }
251 
260 void dma_pool_destroy(struct dma_pool *pool)
261 {
262  mutex_lock(&pools_lock);
263  list_del(&pool->pools);
264  if (pool->dev && list_empty(&pool->dev->dma_pools))
265  device_remove_file(pool->dev, &dev_attr_pools);
266  mutex_unlock(&pools_lock);
267 
268  while (!list_empty(&pool->page_list)) {
269  struct dma_page *page;
270  page = list_entry(pool->page_list.next,
271  struct dma_page, page_list);
272  if (is_page_busy(page)) {
273  if (pool->dev)
274  dev_err(pool->dev,
275  "dma_pool_destroy %s, %p busy\n",
276  pool->name, page->vaddr);
277  else
279  "dma_pool_destroy %s, %p busy\n",
280  pool->name, page->vaddr);
281  /* leak the still-in-use consistent memory */
282  list_del(&page->page_list);
283  kfree(page);
284  } else
285  pool_free_page(pool, page);
286  }
287 
288  kfree(pool);
289 }
291 
302 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
304 {
305  unsigned long flags;
306  struct dma_page *page;
307  size_t offset;
308  void *retval;
309 
310  might_sleep_if(mem_flags & __GFP_WAIT);
311 
312  spin_lock_irqsave(&pool->lock, flags);
313  list_for_each_entry(page, &pool->page_list, page_list) {
314  if (page->offset < pool->allocation)
315  goto ready;
316  }
317 
318  /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
319  spin_unlock_irqrestore(&pool->lock, flags);
320 
321  page = pool_alloc_page(pool, mem_flags);
322  if (!page)
323  return NULL;
324 
325  spin_lock_irqsave(&pool->lock, flags);
326 
327  list_add(&page->page_list, &pool->page_list);
328  ready:
329  page->in_use++;
330  offset = page->offset;
331  page->offset = *(int *)(page->vaddr + offset);
332  retval = offset + page->vaddr;
333  *handle = offset + page->dma;
334 #ifdef DMAPOOL_DEBUG
335  memset(retval, POOL_POISON_ALLOCATED, pool->size);
336 #endif
337  spin_unlock_irqrestore(&pool->lock, flags);
338  return retval;
339 }
341 
342 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
343 {
344  struct dma_page *page;
345 
346  list_for_each_entry(page, &pool->page_list, page_list) {
347  if (dma < page->dma)
348  continue;
349  if (dma < (page->dma + pool->allocation))
350  return page;
351  }
352  return NULL;
353 }
354 
364 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
365 {
366  struct dma_page *page;
367  unsigned long flags;
368  unsigned int offset;
369 
370  spin_lock_irqsave(&pool->lock, flags);
371  page = pool_find_page(pool, dma);
372  if (!page) {
373  spin_unlock_irqrestore(&pool->lock, flags);
374  if (pool->dev)
375  dev_err(pool->dev,
376  "dma_pool_free %s, %p/%lx (bad dma)\n",
377  pool->name, vaddr, (unsigned long)dma);
378  else
379  printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
380  pool->name, vaddr, (unsigned long)dma);
381  return;
382  }
383 
384  offset = vaddr - page->vaddr;
385 #ifdef DMAPOOL_DEBUG
386  if ((dma - page->dma) != offset) {
387  spin_unlock_irqrestore(&pool->lock, flags);
388  if (pool->dev)
389  dev_err(pool->dev,
390  "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
391  pool->name, vaddr, (unsigned long long)dma);
392  else
394  "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
395  pool->name, vaddr, (unsigned long long)dma);
396  return;
397  }
398  {
399  unsigned int chain = page->offset;
400  while (chain < pool->allocation) {
401  if (chain != offset) {
402  chain = *(int *)(page->vaddr + chain);
403  continue;
404  }
405  spin_unlock_irqrestore(&pool->lock, flags);
406  if (pool->dev)
407  dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
408  "already free\n", pool->name,
409  (unsigned long long)dma);
410  else
411  printk(KERN_ERR "dma_pool_free %s, dma %Lx "
412  "already free\n", pool->name,
413  (unsigned long long)dma);
414  return;
415  }
416  }
417  memset(vaddr, POOL_POISON_FREED, pool->size);
418 #endif
419 
420  page->in_use--;
421  *(int *)vaddr = page->offset;
422  page->offset = offset;
423  /*
424  * Resist a temptation to do
425  * if (!is_page_busy(page)) pool_free_page(pool, page);
426  * Better have a few empty pages hang around.
427  */
428  spin_unlock_irqrestore(&pool->lock, flags);
429 }
431 
432 /*
433  * Managed DMA pool
434  */
435 static void dmam_pool_release(struct device *dev, void *res)
436 {
437  struct dma_pool *pool = *(struct dma_pool **)res;
438 
439  dma_pool_destroy(pool);
440 }
441 
442 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
443 {
444  return *(struct dma_pool **)res == match_data;
445 }
446 
458 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
459  size_t size, size_t align, size_t allocation)
460 {
461  struct dma_pool **ptr, *pool;
462 
463  ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
464  if (!ptr)
465  return NULL;
466 
467  pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
468  if (pool)
469  devres_add(dev, ptr);
470  else
471  devres_free(ptr);
472 
473  return pool;
474 }
476 
483 void dmam_pool_destroy(struct dma_pool *pool)
484 {
485  struct device *dev = pool->dev;
486 
487  WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
488  dma_pool_destroy(pool);
489 }