20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
24 #include <linux/mman.h>
29 #include <asm/cputype.h>
30 #include <asm/sections.h>
31 #include <asm/setup.h>
32 #include <asm/sizes.h>
34 #include <asm/mmu_context.h>
62 .policy =
"writethrough",
66 .policy =
"writeback",
77 static int __init early_cachepolicy(
char *
p)
82 for (i = 0; i <
ARRAY_SIZE(cache_policies); i++) {
89 pr_err(
"ERROR: unknown or unsupported cache policy: %s\n", p);
100 " bfi %0, %1, #%2, #8\n"
101 " msr mair_el1, %0\n"
127 static void __init init_mem_pgprot(
void)
143 for (i = 0; i < 16; i++) {
162 static void __init *early_alloc(
unsigned long sz)
170 unsigned long end,
unsigned long pfn)
184 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
187 static void __init alloc_init_pmd(
pud_t *pud,
unsigned long addr,
210 }
while (pmd++, addr = next, addr != end);
213 static void __init alloc_init_pud(
pgd_t *pgd,
unsigned long addr,
214 unsigned long end,
unsigned long phys)
221 alloc_init_pmd(pud, addr, next, phys);
223 }
while (pud++, addr = next, addr != end);
237 pr_warning(
"BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
243 length =
PAGE_ALIGN(size + (virt & ~PAGE_MASK));
248 next = pgd_addr_end(addr, end);
249 alloc_init_pud(pgd, addr, next, phys);
251 }
while (pgd++, addr = next, addr != end);
254 static void __init map_mem(
void)
256 struct memblock_region *
reg;
259 for_each_memblock(
memory, reg) {
350 #ifdef CONFIG_SPARSEMEM_VMEMMAP
351 #ifdef CONFIG_ARM64_64K_PAGES
353 unsigned long size,
int node)
359 unsigned long size,
int node)
361 unsigned long addr = (
unsigned long)start_page;
362 unsigned long end = (
unsigned long)(start_page + size);
390 }
while (addr = next, addr != end);