17 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <asm/pgalloc.h>
30 #include <asm/sections.h>
41 #ifdef CONFIG_ZONE_DMA
42 unsigned long num_dma_physpages;
57 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
58 #define NODEDATA_ALIGN(addr, node) \
59 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
60 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
75 static int __init build_node_maps(
unsigned long start,
unsigned long len,
78 unsigned long spfn, epfn,
end = start + len;
107 for_each_possible_early_cpu(cpu)
118 static
unsigned long __meminit compute_pernodesize(
int node)
120 unsigned long pernodesize = 0,
cpus;
122 cpus = early_nr_cpus_node(node);
141 static void *per_cpu_node_setup(
void *
cpu_data,
int node)
146 for_each_possible_early_cpu(cpu) {
152 memcpy(
__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
153 __per_cpu_offset[
cpu] = (
char *)
__va(cpu_data) -
167 (
unsigned long)cpu_data -
168 (
unsigned long)__per_cpu_start);
198 panic(
"failed to allocate pcpu_alloc_info");
199 cpu_map = ai->
groups[0].cpu_map;
205 (
void *)(__per_cpu_offset[cpu] + __per_cpu_start));
206 base_offset = (
void *)__per_cpu_start - base;
213 cpu_map[unit++] = cpu;
217 static_size = __per_cpu_end - __per_cpu_start;
222 static_size, reserved_size);
224 ai->static_size = static_size;
225 ai->reserved_size = reserved_size;
226 ai->dyn_size = dyn_size;
229 ai->alloc_size = PERCPU_PAGE_SIZE;
237 for (unit = 0; unit < nr_units; unit++) {
241 if (node == prev_node) {
250 gi->cpu_map = &cpu_map[
unit];
255 panic(
"failed to setup percpu area (err=%d)", rc);
267 static void __init fill_pernode(
int node,
unsigned long pernode,
268 unsigned long pernodesize)
271 int cpus = early_nr_cpus_node(node);
274 mem_data[
node].pernode_addr = pernode;
275 mem_data[
node].pernode_size = pernodesize;
278 cpu_data = (
void *)pernode;
279 pernode += PERCPU_PAGE_SIZE *
cpus;
285 mem_data[
node].node_data =
__va(pernode);
291 cpu_data = per_cpu_node_setup(cpu_data, node);
324 static int __init find_pernode_space(
unsigned long start,
unsigned long len,
327 unsigned long spfn, epfn;
328 unsigned long pernodesize = 0, pernode,
pages, mapsize;
332 epfn = (start + len) >> PAGE_SHIFT;
345 if (mem_data[node].pernode_addr)
352 pernodesize = compute_pernodesize(node);
356 if (start + len > (pernode + pernodesize + mapsize))
357 fill_pernode(node, pernode, pernodesize);
373 static int __init free_node_bootmem(
unsigned long start,
unsigned long len,
388 static void __init reserve_pernode_space(
void)
409 size = mem_data[
node].pernode_size;
410 base =
__pa(mem_data[node].pernode_addr);
415 static void __meminit scatter_node_data(
void)
429 if (pgdat_list[node]) {
430 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
431 memcpy(dst, pgdat_list,
sizeof(pgdat_list));
444 static void __init initialize_pernode_data(
void)
452 for_each_possible_early_cpu(cpu) {
454 per_cpu(ia64_cpu_info, cpu).node_data =
455 mem_data[
node].node_data;
462 cpu0_cpu_info = (
struct cpuinfo_ia64 *)(__phys_per_cpu_start +
463 ((
char *)&ia64_cpu_info - __per_cpu_start));
464 cpu0_cpu_info->node_data = mem_data[
node].node_data;
476 static void __init *memory_less_node_alloc(
int nid,
unsigned long pernodesize)
480 int bestnode = -1,
node, anynode = 0;
505 static void __init memory_less_nodes(
void)
507 unsigned long pernodesize;
512 pernodesize = compute_pernodesize(node);
513 pernode = memory_less_node_alloc(node, pernodesize);
514 fill_pernode(node,
__pa(pernode), pernodesize);
549 mem_data[
node].min_pfn = ~0
UL;
559 unsigned long pernode, pernodesize,
map;
568 pernode = mem_data[
node].pernode_addr;
569 pernodesize = mem_data[
node].pernode_size;
570 map = pernode + pernodesize;
580 reserve_pernode_space();
582 initialize_pernode_data();
599 static int first_time = 1;
603 for_each_possible_early_cpu(cpu)
604 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
619 int i, total_reserved = 0;
620 int total_shared = 0, total_cached = 0;
621 unsigned long total_present = 0;
628 unsigned long present;
635 pgdat_resize_lock(pgdat, &flags);
644 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
647 if (PageReserved(page))
649 else if (PageSwapCache(page))
651 else if (page_count(page))
652 shared += page_count(page)-1;
654 pgdat_resize_unlock(pgdat, &flags);
655 total_present += present;
660 "shrd: %10d, swpd: %10d\n", nid,
687 unsigned long rs, re,
end = start + len;
729 static __init int count_node_pages(
unsigned long start,
unsigned long len,
int node)
731 unsigned long end = start + len;
734 #ifdef CONFIG_ZONE_DMA
736 mem_data[
node].num_dma_physpages +=
743 mem_data[
node].min_pfn =
min(mem_data[node].min_pfn,
744 start >> PAGE_SHIFT);
757 unsigned long max_dma;
758 unsigned long pfn_offset = 0;
761 unsigned long max_zone_pfns[MAX_NR_ZONES];
767 sparse_memory_present_with_active_regions(
MAX_NUMNODES);
770 #ifdef CONFIG_VIRTUAL_MEM_MAP
772 sizeof(
struct page));
775 printk(
"Virtual mem_map starts at 0x%p\n", vmem_map);
780 pfn_offset = mem_data[
node].min_pfn;
782 #ifdef CONFIG_VIRTUAL_MEM_MAP
783 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
785 if (mem_data[node].max_pfn > max_pfn)
786 max_pfn = mem_data[
node].max_pfn;
789 memset(max_zone_pfns, 0,
sizeof(max_zone_pfns));
790 #ifdef CONFIG_ZONE_DMA
791 max_zone_pfns[ZONE_DMA] = max_dma;
794 free_area_init_nodes(max_zone_pfns);
799 #ifdef CONFIG_MEMORY_HOTPLUG
802 unsigned long size = compute_pernodesize(nid);
807 void arch_free_nodedata(
pg_data_t *pgdat)
812 void arch_refresh_nodedata(
int update_node,
pg_data_t *update_pgdat)
814 pgdat_list[update_node] = update_pgdat;
819 #ifdef CONFIG_SPARSEMEM_VMEMMAP
821 unsigned long size,
int node)