7 #include <linux/kernel.h>
12 #include <linux/elf.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
22 #include <linux/bitops.h>
23 #include <linux/kexec.h>
27 #include <asm/machvec.h>
30 #include <asm/pgalloc.h>
32 #include <asm/sections.h>
34 #include <asm/uaccess.h>
35 #include <asm/unistd.h>
37 #include <asm/paravirt.h>
43 #ifdef CONFIG_VIRTUAL_MEM_MAP
46 struct page *vmem_map;
77 unsigned long pg_addr,
end;
80 end = (
unsigned long) addr + size;
91 unsigned long stack_size = rlimit_max(
RLIMIT_STACK) & -16;
141 vma->
vm_flags = VM_READ | VM_MAYREAD | VM_IO |
142 VM_DONTEXPAND | VM_DONTDUMP;
157 unsigned long addr, eaddr;
161 while (addr < eaddr) {
211 printk(
KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
217 ClearPageReserved(page);
218 init_page_count(page);
235 if (!PageReserved(page))
236 printk(
KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
248 pte = pte_alloc_kernel(pmd, address);
274 #ifdef HAVE_BUGGY_SEGREL
289 put_kernel_page(
ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
300 unsigned long pta, impl_va_bits;
303 #ifdef CONFIG_DISABLE_VHPT
304 # define VHPT_ENABLE_BIT 0
306 # define VHPT_ENABLE_BIT 1
321 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
329 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
330 # define POW2(n) (1ULL << (n))
334 if (impl_va_bits < 51 || impl_va_bits > 61)
335 panic(
"CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
344 panic(
"Cannot build a big enough virtual-linear page table"
345 " to cover mapped address space.\n"
346 " Try using a smaller page size.\n");
362 #ifdef CONFIG_HUGETLB_PAGE
368 #ifdef CONFIG_VIRTUAL_MEM_MAP
369 int vmemmap_find_next_valid_pfn(
int node,
int i)
371 unsigned long end_address, hole_next_pfn;
372 unsigned long stop_address;
378 stop_address = (
unsigned long) &vmem_map[
410 if ((end_address < stop_address) &&
417 }
while (end_address < stop_address);
419 end_address =
min(end_address, stop_address);
420 end_address = end_address - (
unsigned long) vmem_map +
sizeof(
struct page) - 1;
421 hole_next_pfn = end_address /
sizeof(
struct page);
427 unsigned long address, start_page, end_page;
428 struct page *map_start, *map_end;
439 end_page =
PAGE_ALIGN((
unsigned long) map_end);
442 for (address = start_page; address < end_page; address +=
PAGE_SIZE) {
463 struct memmap_init_callback_data {
471 virtual_memmap_init(
u64 start,
u64 end,
void *arg)
473 struct memmap_init_callback_data *
args;
474 struct page *map_start, *map_end;
476 args = (
struct memmap_init_callback_data *) arg;
480 if (map_start < args->start)
481 map_start = args->start;
482 if (map_end > args->end)
490 map_start -= ((
unsigned long) map_start & (
PAGE_SIZE - 1)) /
sizeof(
struct page);
491 map_end += ((
PAGE_ALIGN((
unsigned long) map_end) - (
unsigned long) map_end)
492 /
sizeof(
struct page));
494 if (map_start < map_end)
503 unsigned long start_pfn)
509 struct memmap_init_callback_data args;
513 args.end = start +
size;
527 return (
__get_user(byte, (
char __user *) pg) == 0)
529 || (
__get_user(byte, (
char __user *) (pg + 1) - 1) == 0));
533 int __init find_largest_hole(
u64 start,
u64 end,
void *arg)
541 if (*max_gap < (start - last_end))
542 *max_gap = start - last_end;
551 u64 end = start + len;
566 count_reserved_pages(
u64 start,
u64 end,
void *arg)
568 unsigned long num_reserved = 0;
574 *count += num_reserved;
581 unsigned long pfn_start, pfn_end;
582 #ifdef CONFIG_FLATMEM
605 nolwsys_setup (
char *
s)
611 __setup(
"nolwsys", nolwsys_setup);
616 long reserved_pages, codesize, datasize, initsize;
633 #ifdef CONFIG_FLATMEM
641 if (pgdat->
bdata->node_bootmem_map)
651 printk(
KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
652 "%luk data, %luk init)\n", nr_free_pages() << (
PAGE_SHIFT - 10),
654 reserved_pages << (
PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
666 if (!fsyscall_table[i] || nolwsys)
667 fsyscall_table[
i] = sys_call_table[
i] | 1;
672 #ifdef CONFIG_MEMORY_HOTPLUG
677 unsigned long start_pfn = start >>
PAGE_SHIFT;
684 ret = __add_pages(nid, zone, start_pfn, nr_pages);
687 printk(
"%s: Problem encountered in __add_pages() as ret=%d\n",
704 per_linux32_init(
void)
706 ia32_exec_domain.name =
"Linux/x86";
707 ia32_exec_domain.handler =
NULL;