9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
26 #include <linux/pci.h>
28 #include <linux/poison.h>
30 #include <linux/module.h>
36 #include <asm/processor.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/pgalloc.h>
42 #include <asm/fixmap.h>
46 #include <asm/mmu_context.h>
49 #include <asm/sections.h>
50 #include <asm/kdebug.h>
52 #include <asm/cacheflush.h>
54 #include <asm/uv/uv.h>
55 #include <asm/setup.h>
57 static int __init parse_direct_gbpages_off(
char *
arg)
64 static int __init parse_direct_gbpages_on(
char *
arg)
94 else if (!
strcmp(str,
"off"))
98 __setup(
"noexec32=", nonx32_setup);
108 for (address = start; address <=
end; address +=
PGDIR_SIZE) {
131 spin_unlock(pgt_lock);
141 static __ref void *spp_getpage(
void)
150 if (!ptr || ((
unsigned long)ptr & ~
PAGE_MASK)) {
151 panic(
"set_pte_phys: cannot allocate page data %s\n",
172 static pmd_t *fill_pmd(
pud_t *pud,
unsigned long vaddr)
184 static pte_t *fill_pte(
pmd_t *pmd,
unsigned long vaddr)
202 pmd = fill_pmd(pud, vaddr);
203 pte = fill_pte(pmd, vaddr);
219 pr_debug(
"set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
224 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
237 pud = fill_pud(pgd, vaddr);
238 return fill_pmd(pud, vaddr);
246 return fill_pte(pmd, vaddr);
252 static void __init __init_extra_mapping(
unsigned long phys,
unsigned long size,
263 pud = (
pud_t *) spp_getpage();
269 pmd = (
pmd_t *) spp_getpage();
312 if (vaddr < (
unsigned long)
_text || vaddr > end)
317 static __ref void *alloc_low_page(
unsigned long *phys)
330 panic(
"alloc_low_page: ran out of memory");
338 static __ref void *map_low_page(
void *virt)
349 adr = (
void *)(((
unsigned long)adr) |
left);
354 static __ref void unmap_low_page(
void *adr)
367 unsigned long last_map_addr =
end;
395 printk(
" pte=%p addr=%lx pte=%016lx\n",
404 return last_map_addr;
409 unsigned long page_size_mask,
pgprot_t prot)
411 unsigned long pages = 0,
next;
412 unsigned long last_map_addr =
end;
417 unsigned long pte_phys;
422 if (address >= end) {
434 spin_lock(&
init_mm.page_table_lock);
436 last_map_addr = phys_pte_init(pte, address,
439 spin_unlock(&
init_mm.page_table_lock);
457 last_map_addr =
next;
465 spin_lock(&
init_mm.page_table_lock);
469 spin_unlock(&
init_mm.page_table_lock);
470 last_map_addr =
next;
474 pte = alloc_low_page(&pte_phys);
475 last_map_addr = phys_pte_init(pte, address, end, new_prot);
478 spin_lock(&
init_mm.page_table_lock);
480 spin_unlock(&
init_mm.page_table_lock);
483 return last_map_addr;
487 phys_pud_init(
pud_t *
pud_page,
unsigned long addr,
unsigned long end,
488 unsigned long page_size_mask)
490 unsigned long pages = 0,
next;
491 unsigned long last_map_addr =
end;
513 last_map_addr = phys_pmd_init(pmd, addr, end,
514 page_size_mask, prot);
534 last_map_addr =
next;
542 spin_lock(&
init_mm.page_table_lock);
545 spin_unlock(&
init_mm.page_table_lock);
546 last_map_addr =
next;
550 pmd = alloc_low_page(&pmd_phys);
551 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
555 spin_lock(&
init_mm.page_table_lock);
557 spin_unlock(&
init_mm.page_table_lock);
563 return last_map_addr;
569 unsigned long page_size_mask)
571 bool pgd_changed =
false;
572 unsigned long next, last_map_addr =
end;
579 for (; start <
end; start =
next) {
590 last_map_addr = phys_pud_init(pud,
__pa(start),
591 __pa(end), page_size_mask);
596 pud = alloc_low_page(&pud_phys);
597 last_map_addr = phys_pud_init(pud,
__pa(start),
__pa(next),
601 spin_lock(&
init_mm.page_table_lock);
603 spin_unlock(&
init_mm.page_table_lock);
612 return last_map_addr;
624 sparse_memory_present_with_active_regions(
MAX_NUMNODES);
641 #ifdef CONFIG_MEMORY_HOTPLUG
646 static void update_end_of_memory_vars(
u64 start,
u64 size)
648 unsigned long end_pfn =
PFN_UP(start + size);
665 unsigned long last_mapped_pfn, start_pfn = start >>
PAGE_SHIFT;
673 ret = __add_pages(nid, zone, start_pfn, nr_pages);
677 update_end_of_memory_vars(start, size);
689 long codesize, reservedpages, datasize, initsize;
690 unsigned long absent_pages;
705 absent_pages = absent_pages_in_range(0,
max_pfn);
706 reservedpages =
max_pfn - totalram_pages - absent_pages;
718 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
719 nr_free_pages() << (PAGE_SHIFT-10),
722 absent_pages << (PAGE_SHIFT-10),
723 reservedpages << (PAGE_SHIFT-10),
728 #ifdef CONFIG_DEBUG_RODATA
732 int kernel_set_to_readonly;
734 void set_kernel_text_rw(
void)
737 unsigned long end =
PFN_ALIGN(__stop___ex_table);
739 if (!kernel_set_to_readonly)
742 pr_debug(
"Set kernel text: %lx - %lx for read write\n",
753 void set_kernel_text_ro(
void)
756 unsigned long end =
PFN_ALIGN(__stop___ex_table);
758 if (!kernel_set_to_readonly)
761 pr_debug(
"Set kernel text: %lx - %lx for read only\n",
770 void mark_rodata_ro(
void)
773 unsigned long rodata_start =
775 unsigned long end = (
unsigned long) &__end_rodata_hpage_align;
776 unsigned long text_end =
PAGE_ALIGN((
unsigned long) &__stop___ex_table);
777 unsigned long rodata_end =
PAGE_ALIGN((
unsigned long) &__end_rodata);
781 (end - start) >> 10);
784 kernel_set_to_readonly = 1;
790 set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
794 #ifdef CONFIG_CPA_DEBUG
821 if (above != 0 && above != -1
UL)
860 #ifdef CONFIG_IA32_EMULATION
861 if (!mm || mm->
context.ia32_compat)
891 if (vma == &gate_vma)
901 return 2
UL * 1024 * 1024 * 1024;
907 #ifdef CONFIG_SPARSEMEM_VMEMMAP
918 unsigned long addr = (
unsigned long)start_page;
919 unsigned long end = (
unsigned long)(start_page + size);
925 for (; addr <
end; addr =
next) {
966 if (p_end != p || node_start != node) {
969 addr_start, addr_end-1, p_start, p_end-1, node_start);
986 void __meminit vmemmap_populate_print_last(
void)
990 addr_start, addr_end-1, p_start, p_end-1, node_start);