12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
16 #include <linux/mman.h>
23 #include <asm/cputype.h>
24 #include <asm/sections.h>
25 #include <asm/setup.h>
26 #include <asm/sizes.h>
28 #include <asm/memblock.h>
59 __setup(
"noalign", noalign_setup);
74 set_cr((get_cr() & ~mask) |
set);
80 unsigned long virtual;
86 #define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
87 PTE_DIRTY | PTE_READ | PTE_WRITE)
88 #define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
89 PMD_SECT_READ | PMD_SECT_WRITE)
91 static struct mem_type mem_types[] = {
135 static void __init build_mem_type_table(
void)
143 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
145 static void __init *early_alloc(
unsigned long sz)
157 __pmd_populate(pmd,
__pa(pte) | prot);
163 static void __init alloc_init_pte(
pmd_t *pmd,
unsigned long addr,
164 unsigned long end,
unsigned long pfn,
171 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
174 static void __init alloc_init_section(
pgd_t *pgd,
unsigned long addr,
175 unsigned long end,
unsigned long phys,
216 "0x%08llx at 0x%08lx in user region\n",
224 "overlaps vmalloc space\n",
228 type = &mem_types[md->
type];
236 "be mapped using pages, ignoring.\n",
244 unsigned long next = pgd_addr_end(addr, end);
246 alloc_init_section(pgd, addr, next, phys, type);
250 }
while (pgd++, addr != end);
260 static int __init early_vmalloc(
char *
arg)
264 if (vmalloc_reserve <
SZ_16M) {
267 "vmalloc area too small, limiting to %luMB\n",
268 vmalloc_reserve >> 20);
274 "vmalloc area is too big, limiting to %luMB\n",
275 vmalloc_reserve >> 20);
278 vmalloc_min = (
void *)(
VMALLOC_END - vmalloc_reserve);
289 lowmem_limit =
__pa(vmalloc_min - 1) + 1;
300 static inline void prepare_page_table(
void)
317 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
318 if (end >= lowmem_limit)
349 static void __init devicemaps_init(
void)
372 create_mapping(&
map);
382 create_mapping(&
map);
394 static void __init map_lowmem(
void)
396 struct memblock_region *
reg;
399 for_each_memblock(
memory, reg) {
404 if (end > lowmem_limit)
414 create_mapping(&
map);
426 build_mem_type_table();
428 prepare_page_table();
432 top_pmd = pmd_off_k(0xffff0000);
450 unsigned long base_pmdval;
464 unsigned long pmdval = (i <<
PGDIR_SHIFT) | base_pmdval;
469 flush_pmd_entry(pmd);
491 unsigned long pfn =
pte_pfn(*ptep);
506 mapping = page_mapping(page);
511 __flush_icache_all();