Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-contiguous.c
Go to the documentation of this file.
1 /*
2  * Contiguous Memory Allocator for DMA mapping framework
3  * Copyright (c) 2010-2011 by Samsung Electronics.
4  * Written by:
5  * Marek Szyprowski <[email protected]>
6  * Michal Nazarewicz <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as
10  * published by the Free Software Foundation; either version 2 of the
11  * License or (at your optional) any later version of the license.
12  */
13 
14 #define pr_fmt(fmt) "cma: " fmt
15 
16 #ifdef CONFIG_CMA_DEBUG
17 #ifndef DEBUG
18 # define DEBUG
19 #endif
20 #endif
21 
22 #include <asm/page.h>
23 #include <asm/dma-contiguous.h>
24 
25 #include <linux/memblock.h>
26 #include <linux/err.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/page-isolation.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/mm_types.h>
34 #include <linux/dma-contiguous.h>
35 
36 struct cma {
37  unsigned long base_pfn;
38  unsigned long count;
39  unsigned long *bitmap;
40 };
41 
43 
44 #ifdef CONFIG_CMA_SIZE_MBYTES
45 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
46 #else
47 #define CMA_SIZE_MBYTES 0
48 #endif
49 
50 /*
51  * Default global CMA area size can be defined in kernel's .config.
52  * This is usefull mainly for distro maintainers to create a kernel
53  * that works correctly for most supported systems.
54  * The size can be set in bytes or as a percentage of the total memory
55  * in the system.
56  *
57  * Users, who want to set the size of global CMA area for their system
58  * should use cma= kernel parameter.
59  */
60 static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
61 static long size_cmdline = -1;
62 
63 static int __init early_cma(char *p)
64 {
65  pr_debug("%s(%s)\n", __func__, p);
66  size_cmdline = memparse(p, &p);
67  return 0;
68 }
69 early_param("cma", early_cma);
70 
71 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
72 
73 static unsigned long __init __maybe_unused cma_early_percent_memory(void)
74 {
75  struct memblock_region *reg;
76  unsigned long total_pages = 0;
77 
78  /*
79  * We cannot use memblock_phys_mem_size() here, because
80  * memblock_analyze() has not been called yet.
81  */
82  for_each_memblock(memory, reg)
83  total_pages += memblock_region_memory_end_pfn(reg) -
84  memblock_region_memory_base_pfn(reg);
85 
86  return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
87 }
88 
89 #else
90 
91 static inline __maybe_unused unsigned long cma_early_percent_memory(void)
92 {
93  return 0;
94 }
95 
96 #endif
97 
108 {
109  unsigned long selected_size = 0;
110 
111  pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
112 
113  if (size_cmdline != -1) {
114  selected_size = size_cmdline;
115  } else {
116 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
117  selected_size = size_bytes;
118 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
119  selected_size = cma_early_percent_memory();
120 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
121  selected_size = min(size_bytes, cma_early_percent_memory());
122 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
123  selected_size = max(size_bytes, cma_early_percent_memory());
124 #endif
125  }
126 
127  if (selected_size) {
128  pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129  selected_size / SZ_1M);
130 
131  dma_declare_contiguous(NULL, selected_size, 0, limit);
132  }
133 };
134 
135 static DEFINE_MUTEX(cma_mutex);
136 
137 static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
138 {
139  unsigned long pfn = base_pfn;
140  unsigned i = count >> pageblock_order;
141  struct zone *zone;
142 
143  WARN_ON_ONCE(!pfn_valid(pfn));
144  zone = page_zone(pfn_to_page(pfn));
145 
146  do {
147  unsigned j;
148  base_pfn = pfn;
149  for (j = pageblock_nr_pages; j; --j, pfn++) {
150  WARN_ON_ONCE(!pfn_valid(pfn));
151  if (page_zone(pfn_to_page(pfn)) != zone)
152  return -EINVAL;
153  }
154  init_cma_reserved_pageblock(pfn_to_page(base_pfn));
155  } while (--i);
156  return 0;
157 }
158 
159 static __init struct cma *cma_create_area(unsigned long base_pfn,
160  unsigned long count)
161 {
162  int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
163  struct cma *cma;
164  int ret = -ENOMEM;
165 
166  pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
167 
168  cma = kmalloc(sizeof *cma, GFP_KERNEL);
169  if (!cma)
170  return ERR_PTR(-ENOMEM);
171 
172  cma->base_pfn = base_pfn;
173  cma->count = count;
174  cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
175 
176  if (!cma->bitmap)
177  goto no_mem;
178 
179  ret = cma_activate_area(base_pfn, count);
180  if (ret)
181  goto error;
182 
183  pr_debug("%s: returned %p\n", __func__, (void *)cma);
184  return cma;
185 
186 error:
187  kfree(cma->bitmap);
188 no_mem:
189  kfree(cma);
190  return ERR_PTR(ret);
191 }
192 
193 static struct cma_reserved {
195  unsigned long size;
196  struct device *dev;
197 } cma_reserved[MAX_CMA_AREAS] __initdata;
198 static unsigned cma_reserved_count __initdata;
199 
200 static int __init cma_init_reserved_areas(void)
201 {
202  struct cma_reserved *r = cma_reserved;
203  unsigned i = cma_reserved_count;
204 
205  pr_debug("%s()\n", __func__);
206 
207  for (; i; --i, ++r) {
208  struct cma *cma;
209  cma = cma_create_area(PFN_DOWN(r->start),
210  r->size >> PAGE_SHIFT);
211  if (!IS_ERR(cma))
212  dev_set_cma_area(r->dev, cma);
213  }
214  return 0;
215 }
216 core_initcall(cma_init_reserved_areas);
217 
230 int __init dma_declare_contiguous(struct device *dev, unsigned long size,
232 {
233  struct cma_reserved *r = &cma_reserved[cma_reserved_count];
234  unsigned long alignment;
235 
236  pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237  (unsigned long)size, (unsigned long)base,
238  (unsigned long)limit);
239 
240  /* Sanity checks */
241  if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
242  pr_err("Not enough slots for CMA reserved regions!\n");
243  return -ENOSPC;
244  }
245 
246  if (!size)
247  return -EINVAL;
248 
249  /* Sanitise input arguments */
250  alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
251  base = ALIGN(base, alignment);
252  size = ALIGN(size, alignment);
253  limit &= ~(alignment - 1);
254 
255  /* Reserve memory */
256  if (base) {
257  if (memblock_is_region_reserved(base, size) ||
258  memblock_reserve(base, size) < 0) {
259  base = -EBUSY;
260  goto err;
261  }
262  } else {
263  /*
264  * Use __memblock_alloc_base() since
265  * memblock_alloc_base() panic()s.
266  */
267  phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
268  if (!addr) {
269  base = -ENOMEM;
270  goto err;
271  } else if (addr + size > ~(unsigned long)0) {
272  memblock_free(addr, size);
273  base = -EINVAL;
274  goto err;
275  } else {
276  base = addr;
277  }
278  }
279 
280  /*
281  * Each reserved area must be initialised later, when more kernel
282  * subsystems (like slab allocator) are available.
283  */
284  r->start = base;
285  r->size = size;
286  r->dev = dev;
287  cma_reserved_count++;
288  pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
289  (unsigned long)base);
290 
291  /* Architecture specific contiguous memory fixup. */
292  dma_contiguous_early_fixup(base, size);
293  return 0;
294 err:
295  pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
296  return base;
297 }
298 
310 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
311  unsigned int align)
312 {
313  unsigned long mask, pfn, pageno, start = 0;
314  struct cma *cma = dev_get_cma_area(dev);
315  struct page *page = NULL;
316  int ret;
317 
318  if (!cma || !cma->count)
319  return NULL;
320 
321  if (align > CONFIG_CMA_ALIGNMENT)
322  align = CONFIG_CMA_ALIGNMENT;
323 
324  pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
325  count, align);
326 
327  if (!count)
328  return NULL;
329 
330  mask = (1 << align) - 1;
331 
332  mutex_lock(&cma_mutex);
333 
334  for (;;) {
335  pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
336  start, count, mask);
337  if (pageno >= cma->count)
338  break;
339 
340  pfn = cma->base_pfn + pageno;
341  ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
342  if (ret == 0) {
343  bitmap_set(cma->bitmap, pageno, count);
344  page = pfn_to_page(pfn);
345  break;
346  } else if (ret != -EBUSY) {
347  break;
348  }
349  pr_debug("%s(): memory range at %p is busy, retrying\n",
350  __func__, pfn_to_page(pfn));
351  /* try again with a bit different memory target */
352  start = pageno + mask + 1;
353  }
354 
355  mutex_unlock(&cma_mutex);
356  pr_debug("%s(): returned %p\n", __func__, page);
357  return page;
358 }
359 
371  int count)
372 {
373  struct cma *cma = dev_get_cma_area(dev);
374  unsigned long pfn;
375 
376  if (!cma || !pages)
377  return false;
378 
379  pr_debug("%s(page %p)\n", __func__, (void *)pages);
380 
381  pfn = page_to_pfn(pages);
382 
383  if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
384  return false;
385 
386  VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
387 
388  mutex_lock(&cma_mutex);
389  bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
390  free_contig_range(pfn, count);
391  mutex_unlock(&cma_mutex);
392 
393  return true;
394 }