14 #define pr_fmt(fmt) "cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
23 #include <asm/dma-contiguous.h>
31 #include <linux/slab.h>
44 #ifdef CONFIG_CMA_SIZE_MBYTES
45 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
47 #define CMA_SIZE_MBYTES 0
61 static long size_cmdline = -1;
63 static int __init early_cma(
char *
p)
71 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
75 struct memblock_region *
reg;
76 unsigned long total_pages = 0;
82 for_each_memblock(
memory, reg)
83 total_pages += memblock_region_memory_end_pfn(reg) -
84 memblock_region_memory_base_pfn(reg);
91 static inline __maybe_unused unsigned long cma_early_percent_memory(
void)
109 unsigned long selected_size = 0;
111 pr_debug(
"%s(limit %08lx)\n", __func__, (
unsigned long)limit);
113 if (size_cmdline != -1) {
114 selected_size = size_cmdline;
116 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
117 selected_size = size_bytes;
118 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
119 selected_size = cma_early_percent_memory();
120 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
121 selected_size =
min(size_bytes, cma_early_percent_memory());
122 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
123 selected_size =
max(size_bytes, cma_early_percent_memory());
128 pr_debug(
"%s: reserving %ld MiB for global area\n", __func__,
129 selected_size /
SZ_1M);
137 static __init int cma_activate_area(
unsigned long base_pfn,
unsigned long count)
139 unsigned long pfn = base_pfn;
154 init_cma_reserved_pageblock(
pfn_to_page(base_pfn));
159 static __init struct cma *cma_create_area(
unsigned long base_pfn,
166 pr_debug(
"%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
179 ret = cma_activate_area(base_pfn, count);
183 pr_debug(
"%s: returned %p\n", __func__, (
void *)cma);
193 static struct cma_reserved {
198 static unsigned cma_reserved_count
__initdata;
200 static int __init cma_init_reserved_areas(
void)
202 struct cma_reserved *
r = cma_reserved;
203 unsigned i = cma_reserved_count;
207 for (;
i; --
i, ++
r) {
209 cma = cma_create_area(
PFN_DOWN(r->start),
210 r->size >> PAGE_SHIFT);
212 dev_set_cma_area(r->dev, cma);
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count];
236 pr_debug(
"%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (
unsigned long)size, (
unsigned long)base,
238 (
unsigned long)limit);
241 if (cma_reserved_count ==
ARRAY_SIZE(cma_reserved)) {
242 pr_err(
"Not enough slots for CMA reserved regions!\n");
251 base =
ALIGN(base, alignment);
252 size =
ALIGN(size, alignment);
253 limit &= ~(alignment - 1);
271 }
else if (addr + size > ~(
unsigned long)0) {
287 cma_reserved_count++;
288 pr_info(
"CMA: reserved %ld MiB at %08lx\n", size /
SZ_1M,
289 (
unsigned long)base);
292 dma_contiguous_early_fixup(base, size);
295 pr_err(
"CMA: failed to reserve %ld MiB\n", size /
SZ_1M);
313 unsigned long mask, pfn, pageno,
start = 0;
314 struct cma *cma = dev_get_cma_area(dev);
318 if (!cma || !cma->
count)
321 if (align > CONFIG_CMA_ALIGNMENT)
322 align = CONFIG_CMA_ALIGNMENT;
324 pr_debug(
"%s(cma %p, count %d, align %d)\n", __func__, (
void *)cma,
330 mask = (1 <<
align) - 1;
337 if (pageno >= cma->
count)
341 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
346 }
else if (ret != -
EBUSY) {
349 pr_debug(
"%s(): memory range at %p is busy, retrying\n",
352 start = pageno + mask + 1;
356 pr_debug(
"%s(): returned %p\n", __func__, page);
373 struct cma *cma = dev_get_cma_area(dev);
379 pr_debug(
"%s(page %p)\n", __func__, (
void *)pages);
383 if (pfn < cma->base_pfn || pfn >= cma->
base_pfn + cma->
count)
390 free_contig_range(pfn, count);