16 #include <linux/export.h>
25 #include <asm/sparsemem.h>
31 #include <asm/setup.h>
33 static int numa_enabled = 1;
37 static int numa_debug;
38 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
48 static int min_common_depth;
49 static int n_mem_addr_cells, n_mem_size_cells;
50 static int form1_affinity;
52 #define MAX_DISTANCE_REF_POINTS 4
53 static int distance_ref_points_depth;
54 static const unsigned int *distance_ref_points;
79 dbg(
"Node to cpumask map for %d nodes\n", nr_node_ids);
82 static int __cpuinit fake_numa_create_new_node(
unsigned long end_pfn,
85 unsigned long long mem;
87 static unsigned int fake_nid;
88 static unsigned long long curr_boundary;
108 if (mem < curr_boundary)
117 while (*p ==
',' || *p ==
' ' || *p ==
'\t')
123 dbg(
"created new fake_node with id %d\n", fake_nid);
135 static void __init get_node_active_region(
unsigned long pfn,
136 struct node_active_region *node_ar)
138 unsigned long start_pfn, end_pfn;
141 for_each_mem_pfn_range(i,
MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
142 if (pfn >= start_pfn && pfn < end_pfn) {
144 node_ar->start_pfn = start_pfn;
145 node_ar->end_pfn = end_pfn;
155 dbg(
"adding cpu %d to node %d\n", cpu, node);
161 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
166 dbg(
"removing cpu %lu from node %d\n", cpu, node);
193 if (!prop || len <
sizeof(
unsigned int))
206 for (i = 0; i < distance_ref_points_depth; i++) {
207 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
217 static void initialize_distance_lookup_table(
int nid,
218 const unsigned int *associativity)
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 distance_lookup_table[nid][
i] =
227 associativity[distance_ref_points[
i]];
234 static int associativity_to_nid(
const unsigned int *associativity)
238 if (min_common_depth == -1)
241 if (associativity[0] >= min_common_depth)
242 nid = associativity[min_common_depth];
248 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
249 initialize_distance_lookup_table(nid, associativity);
261 const unsigned int *
tmp;
263 tmp = of_get_associativity(device);
265 nid = associativity_to_nid(tmp);
277 nid = of_node_to_nid_single(device);
291 static int __init find_min_common_depth(
void)
298 if (firmware_has_feature(FW_FEATURE_OPAL))
318 "ibm,associativity-reference-points",
319 &distance_ref_points_depth);
321 if (!distance_ref_points) {
322 dbg(
"NUMA: ibm,associativity-reference-points not found.\n");
326 distance_ref_points_depth /=
sizeof(
int);
328 #define VEC5_AFFINITY_BYTE 5
329 #define VEC5_AFFINITY 0x80
331 if (firmware_has_feature(FW_FEATURE_OPAL))
337 "ibm,architecture-vec-5",
NULL);
338 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
340 dbg(
"Using form 1 affinity\n");
348 if (form1_affinity) {
349 depth = distance_ref_points[0];
351 if (distance_ref_points_depth < 2) {
353 "short ibm,associativity-reference-points\n");
357 depth = distance_ref_points[1];
378 static void __init get_n_mem_cells(
int *n_addr_cells,
int *n_size_cells)
384 panic(
"numa.c: No memory nodes found!");
391 static unsigned long read_n_cells(
int n,
const unsigned int **
buf)
396 result = (result << 32) | **buf;
410 #define DRCONF_MEM_ASSIGNED 0x00000008
411 #define DRCONF_MEM_AI_INVALID 0x00000040
412 #define DRCONF_MEM_RESERVED 0x00000080
422 drmem->
base_addr = read_n_cells(n_mem_addr_cells, cellp);
428 drmem->
flags = cp[3];
440 static int of_get_drconf_memory(
struct device_node *memory,
const u32 **dm)
446 if (!prop || len <
sizeof(
unsigned int))
454 if (len < (entries * (n_mem_addr_cells + 4) + 1) *
sizeof(
unsigned int))
471 if (!prop || len <
sizeof(
unsigned int))
474 return read_n_cells(n_mem_size_cells, &prop);
493 static int of_get_assoc_arrays(
struct device_node *memory,
499 prop =
of_get_property(memory,
"ibm,associativity-lookup-arrays", &len);
500 if (!prop || len < 2 *
sizeof(
unsigned int))
524 int nid = default_nid;
527 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
544 static int __cpuinit numa_setup_cpu(
unsigned long lcpu)
554 nid = of_node_to_nid_single(cpu);
570 unsigned long lcpu = (
unsigned long)hcpu;
571 int ret = NOTIFY_DONE;
576 numa_setup_cpu(lcpu);
579 #ifdef CONFIG_HOTPLUG_CPU
600 static unsigned long __init numa_enforce_memory_limit(
unsigned long start,
623 static inline int __init read_usm_ranges(
const u32 **usm)
631 return read_n_cells(n_mem_size_cells, usm);
641 unsigned int n,
rc, ranges, is_kexec_kdump = 0;
642 unsigned long lmb_size,
base,
size, sz;
646 n = of_get_drconf_memory(memory, &dm);
650 lmb_size = of_get_lmb_size(memory);
654 rc = of_get_assoc_arrays(memory, &aa);
659 usm = of_get_usable_memory(memory);
663 for (; n != 0; --
n) {
666 read_drconf_cell(&drmem, &dm);
678 if (is_kexec_kdump) {
679 ranges = read_usm_ranges(&usm);
684 if (is_kexec_kdump) {
685 base = read_n_cells(n_mem_addr_cells, &usm);
686 size = read_n_cells(n_mem_size_cells, &usm);
688 nid = of_drconf_to_nid_single(&drmem, &aa);
689 fake_numa_create_new_node(
693 sz = numa_enforce_memory_limit(base, size);
695 memblock_set_node(base, sz, nid);
700 static int __init parse_numa_properties(
void)
706 if (numa_enabled == 0) {
711 min_common_depth = find_min_common_depth();
713 if (min_common_depth < 0)
714 return min_common_depth;
716 dbg(
"NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
729 nid = of_node_to_nid_single(cpu);
742 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
744 for_each_node_by_type(memory,
"memory") {
749 const unsigned int *memcell_buf;
753 "linux,usable-memory", &len);
754 if (!memcell_buf || len <= 0)
756 if (!memcell_buf || len <= 0)
760 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
763 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
764 size = read_n_cells(n_mem_size_cells, &memcell_buf);
771 nid = of_node_to_nid_single(memory);
775 fake_numa_create_new_node(((start + size) >>
PAGE_SHIFT), &nid);
778 if (!(size = numa_enforce_memory_limit(start, size))) {
785 memblock_set_node(start, size, nid);
798 parse_drconf_memory(memory);
803 static void __init setup_nonnuma(
void)
807 unsigned long start_pfn, end_pfn;
808 unsigned int nid = 0;
809 struct memblock_region *
reg;
812 top_of_ram, total_ram);
814 (top_of_ram - total_ram) >> 20);
816 for_each_memblock(memory, reg) {
817 start_pfn = memblock_region_memory_base_pfn(reg);
818 end_pfn = memblock_region_memory_end_pfn(reg);
820 fake_numa_create_new_node(end_pfn, &nid);
821 memblock_set_node(
PFN_PHYS(start_pfn),
822 PFN_PHYS(end_pfn - start_pfn), nid);
832 if (min_common_depth == -1 || !numa_enabled)
857 printk(
"-%u", nr_cpu_ids - 1);
862 static void __init dump_numa_memory_topology(
void)
867 if (min_common_depth == -1 || !numa_enabled)
903 static void __init *careful_zallocation(
int nid,
unsigned long size,
905 unsigned long end_pfn)
909 unsigned long ret_paddr;
918 panic(
"numa.c: cannot allocate %lu bytes for node %d",
921 ret =
__va(ret_paddr);
935 new_nid = early_pfn_to_nid(ret_paddr >>
PAGE_SHIFT);
940 dbg(
"alloc_bootmem %p %lx\n", ret, size);
948 .notifier_call = cpu_numa_callback,
952 static void __init mark_reserved_regions_for_nid(
int nid)
955 struct memblock_region *
reg;
958 unsigned long physbase = reg->base;
959 unsigned long size = reg->size;
960 unsigned long start_pfn = physbase >>
PAGE_SHIFT;
961 unsigned long end_pfn =
PFN_UP(physbase + size);
962 struct node_active_region node_ar;
974 start_pfn >= node_end_pfn)
977 get_node_active_region(start_pfn, &node_ar);
978 while (start_pfn < end_pfn &&
979 node_ar.start_pfn < node_ar.end_pfn) {
980 unsigned long reserve_size =
size;
985 if (end_pfn > node_ar.end_pfn)
986 reserve_size = (node_ar.end_pfn <<
PAGE_SHIFT)
992 if (node_ar.nid == nid) {
993 dbg(
"reserve_bootmem %lx %lx nid=%d\n",
994 physbase, reserve_size, node_ar.nid);
996 physbase, reserve_size,
1003 if (end_pfn <= node_ar.end_pfn)
1011 start_pfn = node_ar.end_pfn;
1013 size = size - reserve_size;
1014 get_node_active_region(start_pfn, &node_ar);
1028 if (parse_numa_properties())
1031 dump_numa_memory_topology();
1034 unsigned long start_pfn, end_pfn;
1035 void *bootmem_vaddr;
1036 unsigned long bootmap_pages;
1038 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1047 NODE_DATA(nid) = careful_zallocation(nid,
1051 dbg(
"node %d\n", nid);
1055 NODE_DATA(nid)->node_start_pfn = start_pfn;
1056 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1061 dbg(
"start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1062 dbg(
"end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1065 bootmem_vaddr = careful_zallocation(nid,
1066 bootmap_pages << PAGE_SHIFT,
1069 dbg(
"bootmap_vaddr = %p\n", bootmem_vaddr);
1072 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1073 start_pfn, end_pfn);
1075 free_bootmem_with_active_regions(nid, end_pfn);
1081 mark_reserved_regions_for_nid(nid);
1082 sparse_memory_present_with_active_regions(nid);
1093 register_cpu_notifier(&ppc64_numa_nb);
1100 unsigned long max_zone_pfns[MAX_NR_ZONES];
1101 memset(max_zone_pfns, 0,
sizeof(max_zone_pfns));
1103 free_area_init_nodes(max_zone_pfns);
1106 static int __init early_numa(
char *p)
1125 #ifdef CONFIG_MEMORY_HOTPLUG
1131 static int hot_add_drconf_scn_to_nid(
struct device_node *memory,
1132 unsigned long scn_addr)
1135 unsigned int drconf_cell_cnt,
rc;
1136 unsigned long lmb_size;
1140 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1141 if (!drconf_cell_cnt)
1144 lmb_size = of_get_lmb_size(memory);
1148 rc = of_get_assoc_arrays(memory, &aa);
1152 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1155 read_drconf_cell(&drmem, &dm);
1164 || (scn_addr >= (drmem.
base_addr + lmb_size)))
1167 nid = of_drconf_to_nid_single(&drmem, &aa);
1179 int hot_add_node_scn_to_nid(
unsigned long scn_addr)
1184 for_each_node_by_type(memory,
"memory") {
1187 const unsigned int *memcell_buf;
1191 if (!memcell_buf || len <= 0)
1195 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1198 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1199 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1201 if ((scn_addr < start) || (scn_addr >= (start + size)))
1204 nid = of_node_to_nid_single(memory);
1212 of_node_put(memory);
1222 int hot_add_scn_to_nid(
unsigned long scn_addr)
1227 if (!numa_enabled || (min_common_depth < 0))
1232 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1233 of_node_put(memory);
1235 nid = hot_add_node_scn_to_nid(scn_addr);
1255 static u64 hot_add_drconf_memory_max(
void)
1258 unsigned int drconf_cell_cnt = 0;
1264 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1265 lmb_size = of_get_lmb_size(memory);
1266 of_node_put(memory);
1268 return lmb_size * drconf_cell_cnt;
1277 u64 memory_hotplug_max(
void)
1284 #ifdef CONFIG_PPC_SPLPAR
1286 static cpumask_t cpu_associativity_changes_mask;
1287 static int vphn_enabled;
1288 static void set_topology_timer(
void);
1294 static void setup_cpu_associativity_change_counters(
void)
1303 u8 *counts = vphn_cpu_change_counts[
cpu];
1304 volatile u8 *hypervisor_counts = lppaca[
cpu].vphn_assoc_counts;
1306 for (i = 0; i < distance_ref_points_depth; i++)
1307 counts[i] = hypervisor_counts[i];
1322 static int update_cpu_associativity_changes_mask(
void)
1325 cpumask_t *changes = &cpu_associativity_changes_mask;
1327 cpumask_clear(changes);
1331 u8 *counts = vphn_cpu_change_counts[
cpu];
1332 volatile u8 *hypervisor_counts = lppaca[
cpu].vphn_assoc_counts;
1334 for (i = 0; i < distance_ref_points_depth; i++) {
1335 if (hypervisor_counts[i] != counts[i]) {
1336 counts[
i] = hypervisor_counts[
i];
1341 cpumask_set_cpu(cpu, changes);
1353 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1359 static int vphn_unpack_associativity(
const long *packed,
unsigned int *unpacked)
1361 int i, nr_assoc_doms = 0;
1364 #define VPHN_FIELD_UNUSED (0xffff)
1365 #define VPHN_FIELD_MSB (0x8000)
1366 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1368 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1369 if (*field == VPHN_FIELD_UNUSED) {
1374 unpacked[
i] = *((
u32*)field);
1376 }
else if (*field & VPHN_FIELD_MSB) {
1378 unpacked[
i] = *field & VPHN_FIELD_MASK;
1385 unpacked[
i] = *((
u32*)field);
1392 unpacked[0] = nr_assoc_doms;
1394 return nr_assoc_doms;
1401 static long hcall_vphn(
unsigned long cpu,
unsigned int *associativity)
1404 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1406 int hwcpu = get_hard_smp_processor_id(cpu);
1408 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1409 vphn_unpack_associativity(retbuf, associativity);
1414 static long vphn_get_associativity(
unsigned long cpu,
1415 unsigned int *associativity)
1419 rc = hcall_vphn(cpu, associativity);
1424 "VPHN is not supported. Disabling polling...\n");
1425 stop_topology_update();
1429 "hcall_vphn() experienced a hardware fault "
1430 "preventing VPHN. Disabling polling...\n");
1431 stop_topology_update();
1443 int cpu, nid, old_nid, changed = 0;
1444 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1448 vphn_get_associativity(cpu, associativity);
1449 nid = associativity_to_nid(associativity);
1486 static void topology_timer_fn(
unsigned long ignored)
1490 if (update_cpu_associativity_changes_mask() > 0)
1492 set_topology_timer();
1497 static void set_topology_timer(
void)
1499 topology_timer.
data = 0;
1507 int start_topology_update(
void)
1512 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1513 get_lppaca()->shared_proc) {
1515 setup_cpu_associativity_change_counters();
1517 set_topology_timer();
1528 int stop_topology_update(
void)