21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/sizes.h>
45 static pte_t *__get_pte_phys(
unsigned long addr)
72 static void set_pte_phys(
unsigned long addr,
unsigned long phys,
pgprot_t prot)
76 pte = __get_pte_phys(addr);
89 static void clear_pte_phys(
unsigned long addr,
pgprot_t prot)
93 pte = __get_pte_phys(addr);
111 set_pte_phys(address, phys, prot);
123 clear_pte_phys(address, prot);
174 for ( ; (i <
PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
176 for ( ; (j <
PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177 pmd = one_md_table_init(pud);
178 #ifndef __PAGETABLE_PMD_FOLDED
181 for (; (k <
PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182 pte = page_table_kmap_check(one_page_table_init(pmd),
195 unsigned long start_pfn, end_pfn;
196 #ifdef CONFIG_NEED_MULTIPLE_NODES
200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
202 #ifdef CONFIG_NEED_MULTIPLE_NODES
210 panic(
"Can't allocate pgdat for node %d\n", nid);
218 NODE_DATA(nid)->node_start_pfn = start_pfn;
219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
222 static void __init bootmem_init_one_node(
unsigned int nid)
224 unsigned long total_pages,
paddr;
225 unsigned long end_pfn;
240 panic(
"Can't allocate bootmap for nid[%d]\n", nid);
244 free_bootmem_with_active_regions(nid, end_pfn);
252 struct memblock_region *
reg;
260 sparse_memory_present_with_active_regions(nid);
265 struct memblock_region *
reg;
269 for_each_memblock(
memory, reg) {
270 unsigned long start_pfn, end_pfn;
271 start_pfn = memblock_region_memory_base_pfn(reg);
272 end_pfn = memblock_region_memory_end_pfn(reg);
283 bootmem_init_one_node(i);
288 static
void __init early_reserve_mem(
void)
290 unsigned long start_pfn;
292 u32 start = zero_base + (
u32)CONFIG_ZERO_PAGE_OFFSET;
311 if (CONFIG_ZERO_PAGE_OFFSET != 0)
323 unsigned long max_zone_pfns[MAX_NR_ZONES];
335 if (
sh_mv.mv_mem_reserve)
336 sh_mv.mv_mem_reserve();
362 memset(swapper_pg_dir, 0,
sizeof(swapper_pg_dir));
379 memset(max_zone_pfns, 0,
sizeof(max_zone_pfns));
383 unsigned long low, start_pfn;
385 start_pfn = pgdat->
bdata->node_min_pfn;
386 low = pgdat->
bdata->node_low_pfn;
391 printk(
"Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
392 nid, start_pfn, low);
395 free_area_init_nodes(max_zone_pfns);
401 static void __init iommu_init(
void)
410 int codesize, datasize, initsize;
420 unsigned long node_pages = 0;
421 void *node_high_memory;
428 totalram_pages += node_pages;
451 "%dk data, %dk init)\n",
459 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
460 #ifdef CONFIG_HIGHMEM
461 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
463 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
464 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
465 #ifdef CONFIG_UNCACHED_MAPPING
466 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
468 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
469 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
470 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
474 #ifdef CONFIG_HIGHMEM
483 ((
unsigned long)high_memory - (
unsigned long)memory_start) >> 20,
485 #ifdef CONFIG_UNCACHED_MAPPING
490 ((
unsigned long)&__init_end -
491 (
unsigned long)&__init_begin) >> 10,
494 ((
unsigned long)&_edata - (
unsigned long)&_etext) >> 10,
496 (
unsigned long)&
_text, (
unsigned long)&_etext,
497 ((
unsigned long)&_etext - (
unsigned long)&_text) >> 10);
513 printk(
"Freeing unused kernel memory: %ldk freed\n",
518 #ifdef CONFIG_BLK_DEV_INITRD
528 printk(
"Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
532 #ifdef CONFIG_MEMORY_HOTPLUG
536 unsigned long start_pfn = start >>
PAGE_SHIFT;
544 start_pfn, nr_pages);
546 printk(
"%s: Failed, __add_pages() == %d\n", __func__, ret);
553 int memory_add_physaddr_to_nid(
u64 addr)