10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
14 #include <linux/mman.h>
22 #include <asm/cputype.h>
23 #include <asm/sections.h>
24 #include <asm/cachetype.h>
25 #include <asm/setup.h>
28 #include <asm/highmem.h>
30 #include <asm/traps.h>
50 #define CPOLICY_UNCACHED 0
51 #define CPOLICY_BUFFERED 1
52 #define CPOLICY_WRITETHROUGH 2
53 #define CPOLICY_WRITEBACK 3
54 #define CPOLICY_WRITEALLOC 4
83 .policy =
"writethrough",
88 .policy =
"writeback",
93 .policy =
"writealloc",
106 static int __init early_cachepolicy(
char *
p)
110 for (i = 0; i <
ARRAY_SIZE(cache_policies); i++) {
134 set_cr(cr_alignment);
141 char *p =
"buffered";
143 early_cachepolicy(p);
150 char *p =
"uncached";
152 early_cachepolicy(p);
157 #ifndef CONFIG_ARM_LPAE
158 static int __init early_ecc(
char *p)
160 if (
memcmp(p,
"on", 2) == 0)
162 else if (
memcmp(p,
"off", 3) == 0)
173 set_cr(cr_alignment);
176 __setup(
"noalign", noalign_setup);
192 set_cr((get_cr() & ~mask) |
set);
198 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
199 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
201 static struct mem_type mem_types[] = {
209 [MT_DEVICE_NONSHARED] = {
237 #ifndef CONFIG_ARM_LPAE
308 static void __init build_mem_type_table(
void)
311 unsigned int cr = get_cr();
312 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
317 #if defined(CONFIG_CPU_DCACHE_DISABLE)
320 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
357 if (mem_types[i].prot_l1)
359 if (mem_types[i].prot_sect)
423 vecs_pgprot = kern_pgprot = user_pgprot = cp->
pte;
429 #ifndef CONFIG_ARM_LPAE
477 #ifdef CONFIG_ARM_LPAE
483 if (mem_types[i].prot_sect)
490 for (i = 0; i < 16; i++) {
519 printk(
"Memory policy: ECC %sabled, Data cache %s\n",
520 ecc_mask ?
"en" :
"dis", cp->
policy);
531 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
544 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
546 static void __init *early_alloc_aligned(
unsigned long sz,
unsigned long align)
553 static void __init *early_alloc(
unsigned long sz)
555 return early_alloc_aligned(sz, sz);
562 __pmd_populate(pmd,
__pa(pte), prot);
568 static void __init alloc_init_pte(
pmd_t *pmd,
unsigned long addr,
569 unsigned long end,
unsigned long pfn,
576 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
579 static void __init alloc_init_section(
pud_t *pud,
unsigned long addr,
594 #ifndef CONFIG_ARM_LPAE
602 }
while (pmd++, addr += SECTION_SIZE, addr != end);
614 static void __init alloc_init_pud(
pgd_t *pgd,
unsigned long addr,
615 unsigned long end,
unsigned long phys,
const struct mem_type *type)
622 alloc_init_section(pud, addr, next, phys, type);
624 }
while (pud++, addr = next, addr != end);
627 #ifndef CONFIG_ARM_LPAE
641 "mapping for 0x%08llx at 0x%08lx\n",
654 "mapping for 0x%08llx at 0x%08lx\n",
661 " at 0x%08lx invalid alignment\n",
679 for (i = 0; i < 16; i++)
685 }
while (addr != end);
705 " at 0x%08lx in user region\n",
714 " at 0x%08lx out of vmalloc space\n",
718 type = &mem_types[md->
type];
720 #ifndef CONFIG_ARM_LPAE
724 if (md->
pfn >= 0x100000) {
725 create_36bit_mapping(md, type);
736 "be mapped using pages, ignoring.\n",
744 unsigned long next = pgd_addr_end(addr, end);
746 alloc_init_pud(pgd, addr, next, phys, type);
750 }
while (pgd++, addr != end);
764 vm = early_alloc_aligned(
sizeof(*vm) * nr, __alignof__(*vm));
766 for (md = io_desc;
nr; md++, nr--) {
783 vm = early_alloc_aligned(
sizeof(*vm), __alignof__(*vm));
784 vm->
addr = (
void *)addr;
791 #ifndef CONFIG_ARM_LPAE
806 static void __init pmd_empty_section_gap(
unsigned long addr)
811 static void __init fill_pmd_gaps(
void)
814 unsigned long addr, next = 0;
819 if (!(vm->
flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
831 pmd = pmd_off_k(addr);
833 pmd_empty_section_gap(addr &
PMD_MASK);
842 if ((addr & ~
PMD_MASK) == SECTION_SIZE) {
843 pmd = pmd_off_k(addr) + 1;
845 pmd_empty_section_gap(addr);
854 #define fill_pmd_gaps() do { } while (0)
857 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
865 if (!(vm->
flags & VM_ARM_STATIC_MAPPING))
868 addr &= ~(
SZ_2M - 1);
869 if (addr == PCI_IO_VIRT_BASE)
876 #define pci_reserve_io() do { } while (0)
879 static void * __initdata vmalloc_min =
887 static int __init early_vmalloc(
char *
arg)
891 if (vmalloc_reserve <
SZ_16M) {
894 "vmalloc area too small, limiting to %luMB\n",
895 vmalloc_reserve >> 20);
901 "vmalloc area is too big, limiting to %luMB\n",
902 vmalloc_reserve >> 20);
905 vmalloc_min = (
void *)(
VMALLOC_END - vmalloc_reserve);
923 #ifdef CONFIG_HIGHMEM
934 if (!highmem &&
__va(bank->
start) < vmalloc_min &&
938 "ignoring high memory\n");
945 bank[1].
start =
__pa(vmalloc_min - 1) + 1;
959 "(!CONFIG_HIGHMEM).\n",
960 (
unsigned long long)bank->
start,
961 (
unsigned long long)bank->
start + bank->
size - 1);
972 "(vmalloc region overlap).\n",
973 (
unsigned long long)bank->
start,
974 (
unsigned long long)bank->
start + bank->
size - 1);
984 unsigned long newsize = vmalloc_min -
__va(bank->
start);
986 "to -%.8llx (vmalloc region overlap).\n",
987 (
unsigned long long)bank->
start,
988 (
unsigned long long)bank->
start + bank->
size - 1,
989 (
unsigned long long)bank->
start + newsize - 1);
990 bank->
size = newsize;
998 #ifdef CONFIG_HIGHMEM
1008 reason =
"with VIPT aliasing cache";
1011 printk(
KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1023 static inline void prepare_page_table(
void)
1034 #ifdef CONFIG_XIP_KERNEL
1044 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1045 if (end >= arm_lowmem_limit)
1057 #ifdef CONFIG_ARM_LPAE
1059 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1060 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1062 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1076 #ifdef CONFIG_SA1111
1112 #ifdef CONFIG_XIP_KERNEL
1117 create_mapping(&
map);
1128 create_mapping(&
map);
1130 #ifdef FLUSH_BASE_MINICACHE
1135 create_mapping(&
map);
1144 map.virtual = 0xffff0000;
1147 create_mapping(&
map);
1152 create_mapping(&
map);
1175 static void __init kmap_init(
void)
1177 #ifdef CONFIG_HIGHMEM
1183 static void __init map_lowmem(
void)
1185 struct memblock_region *
reg;
1188 for_each_memblock(
memory, reg) {
1193 if (end > arm_lowmem_limit)
1203 create_mapping(&
map);
1217 build_mem_type_table();
1218 prepare_page_table();
1221 devicemaps_init(mdesc);
1224 top_pmd = pmd_off_k(0xffff0000);