Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-mapping.c
Go to the documentation of this file.
1 /*
2  * Dynamic DMA mapping support
3  *
4  * Copyright 2005-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later
7  */
8 
9 #include <linux/types.h>
10 #include <linux/gfp.h>
11 #include <linux/string.h>
12 #include <linux/spinlock.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/scatterlist.h>
15 #include <linux/export.h>
16 
17 static spinlock_t dma_page_lock;
18 static unsigned long *dma_page;
19 static unsigned int dma_pages;
20 static unsigned long dma_base;
21 static unsigned long dma_size;
22 static unsigned int dma_initialized;
23 
24 static void dma_alloc_init(unsigned long start, unsigned long end)
25 {
26  spin_lock_init(&dma_page_lock);
27  dma_initialized = 0;
28 
29  dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
31  dma_base = PAGE_ALIGN(start);
32  dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
33  dma_pages = dma_size >> PAGE_SHIFT;
34  memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
35  dma_initialized = 1;
36 
37  printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
38  dma_page, dma_pages, dma_base);
39 }
40 
41 static inline unsigned int get_pages(size_t size)
42 {
43  return ((size - 1) >> PAGE_SHIFT) + 1;
44 }
45 
46 static unsigned long __alloc_dma_pages(unsigned int pages)
47 {
48  unsigned long ret = 0, flags;
49  int i, count = 0;
50 
51  if (dma_initialized == 0)
52  dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
53 
54  spin_lock_irqsave(&dma_page_lock, flags);
55 
56  for (i = 0; i < dma_pages;) {
57  if (test_bit(i++, dma_page) == 0) {
58  if (++count == pages) {
59  while (count--)
60  __set_bit(--i, dma_page);
61 
62  ret = dma_base + (i << PAGE_SHIFT);
63  break;
64  }
65  } else
66  count = 0;
67  }
68  spin_unlock_irqrestore(&dma_page_lock, flags);
69  return ret;
70 }
71 
72 static void __free_dma_pages(unsigned long addr, unsigned int pages)
73 {
74  unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
75  unsigned long flags;
76  int i;
77 
78  if ((page + pages) > dma_pages) {
79  printk(KERN_ERR "%s: freeing outside range.\n", __func__);
80  BUG();
81  }
82 
83  spin_lock_irqsave(&dma_page_lock, flags);
84  for (i = page; i < page + pages; i++)
86 
87  spin_unlock_irqrestore(&dma_page_lock, flags);
88 }
89 
90 void *dma_alloc_coherent(struct device *dev, size_t size,
92 {
93  void *ret;
94 
95  ret = (void *)__alloc_dma_pages(get_pages(size));
96 
97  if (ret) {
98  memset(ret, 0, size);
99  *dma_handle = virt_to_phys(ret);
100  }
101 
102  return ret;
103 }
105 
106 void
107 dma_free_coherent(struct device *dev, size_t size, void *vaddr,
109 {
110  __free_dma_pages((unsigned long)vaddr, get_pages(size));
111 }
113 
114 /*
115  * Streaming DMA mappings
116  */
117 void __dma_sync(dma_addr_t addr, size_t size,
118  enum dma_data_direction dir)
119 {
120  __dma_sync_inline(addr, size, dir);
121 }
123 
124 int
125 dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
127 {
128  struct scatterlist *sg;
129  int i;
130 
131  for_each_sg(sg_list, sg, nents, i) {
132  sg->dma_address = (dma_addr_t) sg_virt(sg);
133  __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
134  }
135 
136  return nents;
137 }
139 
141  int nelems, enum dma_data_direction direction)
142 {
143  struct scatterlist *sg;
144  int i;
145 
146  for_each_sg(sg_list, sg, nelems, i) {
147  sg->dma_address = (dma_addr_t) sg_virt(sg);
148  __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
149  }
150 }