25 #include <linux/device.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/export.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/stat.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/wait.h>
41 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42 #define DMAPOOL_DEBUG 1
78 temp =
scnprintf(next, size,
"poolinfo - 0.1\n");
87 spin_lock_irq(&pool->
lock);
92 spin_unlock_irq(&pool->
lock);
95 temp =
scnprintf(next, size,
"%-16s %4u %4Zu %4Zu %2u\n",
138 }
else if (align & (align - 1)) {
144 }
else if (size < 4) {
148 if ((size % align) != 0)
149 size =
ALIGN(size, align);
155 }
else if ((boundary < size) || (boundary & (boundary - 1))) {
159 retval = kmalloc_node(
sizeof(*retval),
GFP_KERNEL, dev_to_node(dev));
190 INIT_LIST_HEAD(&retval->
pools);
199 unsigned int next_boundary = pool->
boundary;
202 unsigned int next = offset + pool->
size;
204 next = next_boundary;
216 page =
kmalloc(
sizeof(*page), mem_flags);
220 &page->
dma, mem_flags);
225 pool_initialise_page(pool, page);
235 static inline int is_page_busy(
struct dma_page *page)
264 if (pool->
dev && list_empty(&pool->
dev->dma_pools))
272 if (is_page_busy(page)) {
275 "dma_pool_destroy %s, %p busy\n",
279 "dma_pool_destroy %s, %p busy\n",
285 pool_free_page(pool, page);
319 spin_unlock_irqrestore(&pool->
lock, flags);
321 page = pool_alloc_page(pool, mem_flags);
332 retval = offset + page->
vaddr;
333 *handle = offset + page->
dma;
337 spin_unlock_irqrestore(&pool->
lock, flags);
371 page = pool_find_page(pool, dma);
373 spin_unlock_irqrestore(&pool->
lock, flags);
376 "dma_pool_free %s, %p/%lx (bad dma)\n",
377 pool->
name, vaddr, (
unsigned long)dma);
380 pool->
name, vaddr, (
unsigned long)dma);
384 offset = vaddr - page->
vaddr;
386 if ((dma - page->
dma) != offset) {
387 spin_unlock_irqrestore(&pool->
lock, flags);
390 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
391 pool->
name, vaddr, (
unsigned long long)dma);
394 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
395 pool->
name, vaddr, (
unsigned long long)dma);
401 if (chain != offset) {
402 chain = *(
int *)(page->
vaddr + chain);
405 spin_unlock_irqrestore(&pool->
lock, flags);
407 dev_err(pool->
dev,
"dma_pool_free %s, dma %Lx "
408 "already free\n", pool->
name,
409 (
unsigned long long)dma);
412 "already free\n", pool->
name,
413 (
unsigned long long)dma);
421 *(
int *)vaddr = page->
offset;
428 spin_unlock_irqrestore(&pool->
lock, flags);
435 static void dmam_pool_release(
struct device *
dev,
void *
res)
442 static int dmam_pool_match(
struct device *
dev,
void *
res,
void *match_data)
444 return *(
struct dma_pool **)res == match_data;