Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
numa.c
Go to the documentation of this file.
1 /* Common code for 32 and 64-bit NUMA */
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/string.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/memblock.h>
8 #include <linux/mmzone.h>
9 #include <linux/ctype.h>
10 #include <linux/module.h>
11 #include <linux/nodemask.h>
12 #include <linux/sched.h>
13 #include <linux/topology.h>
14 
15 #include <asm/e820.h>
16 #include <asm/proto.h>
17 #include <asm/dma.h>
18 #include <asm/acpi.h>
19 #include <asm/amd_nb.h>
20 
21 #include "numa_internal.h"
22 
24 nodemask_t numa_nodes_parsed __initdata;
25 
28 
29 static struct numa_meminfo numa_meminfo
30 #ifndef CONFIG_MEMORY_HOTPLUG
32 #endif
33 ;
34 
35 static int numa_distance_cnt;
36 static u8 *numa_distance;
37 
38 static __init int numa_setup(char *opt)
39 {
40  if (!opt)
41  return -EINVAL;
42  if (!strncmp(opt, "off", 3))
43  numa_off = 1;
44 #ifdef CONFIG_NUMA_EMU
45  if (!strncmp(opt, "fake=", 5))
46  numa_emu_cmdline(opt + 5);
47 #endif
48 #ifdef CONFIG_ACPI_NUMA
49  if (!strncmp(opt, "noacpi", 6))
50  acpi_numa = -1;
51 #endif
52  return 0;
53 }
54 early_param("numa", numa_setup);
55 
56 /*
57  * apicid, cpu, node mappings
58  */
59 s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
60  [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61 };
62 
64 {
65  int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 
67  if (apicid != BAD_APICID)
68  return __apicid_to_node[apicid];
69  return NUMA_NO_NODE;
70 }
71 
73 EXPORT_SYMBOL(node_to_cpumask_map);
74 
75 /*
76  * Map cpu index to node index
77  */
78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
80 
82 {
83  int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
84 
85  /* early setting, no percpu area yet */
86  if (cpu_to_node_map) {
87  cpu_to_node_map[cpu] = node;
88  return;
89  }
90 
91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
92  if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
93  printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
94  dump_stack();
95  return;
96  }
97 #endif
98  per_cpu(x86_cpu_to_node_map, cpu) = node;
99 
100  if (node != NUMA_NO_NODE)
101  set_cpu_numa_node(cpu, node);
102 }
103 
105 {
107 }
108 
109 /*
110  * Allocate node_to_cpumask_map based on number of available nodes
111  * Requires node_possible_map to be valid.
112  *
113  * Note: cpumask_of_node() is not valid until after this is done.
114  * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
115  */
117 {
118  unsigned int node, num = 0;
119 
120  /* setup nr_node_ids if not done yet */
121  if (nr_node_ids == MAX_NUMNODES) {
123  num = node;
124  nr_node_ids = num + 1;
125  }
126 
127  /* allocate the map */
128  for (node = 0; node < nr_node_ids; node++)
129  alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
130 
131  /* cpumask_of_node() will now work */
132  pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
133 }
134 
135 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
136  struct numa_meminfo *mi)
137 {
138  /* ignore zero length blks */
139  if (start == end)
140  return 0;
141 
142  /* whine about and ignore invalid blks */
143  if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
144  pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
145  nid, start, end - 1);
146  return 0;
147  }
148 
149  if (mi->nr_blks >= NR_NODE_MEMBLKS) {
150  pr_err("NUMA: too many memblk ranges\n");
151  return -EINVAL;
152  }
153 
154  mi->blk[mi->nr_blks].start = start;
155  mi->blk[mi->nr_blks].end = end;
156  mi->blk[mi->nr_blks].nid = nid;
157  mi->nr_blks++;
158  return 0;
159 }
160 
170 {
171  mi->nr_blks--;
172  memmove(&mi->blk[idx], &mi->blk[idx + 1],
173  (mi->nr_blks - idx) * sizeof(mi->blk[0]));
174 }
175 
187 int __init numa_add_memblk(int nid, u64 start, u64 end)
188 {
189  return numa_add_memblk_to(nid, start, end, &numa_meminfo);
190 }
191 
192 /* Initialize NODE_DATA for a node on the local memory */
193 static void __init setup_node_data(int nid, u64 start, u64 end)
194 {
195  const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
196  bool remapped = false;
197  u64 nd_pa;
198  void *nd;
199  int tnid;
200 
201  /*
202  * Don't confuse VM with a node that doesn't have the
203  * minimum amount of memory:
204  */
205  if (end && (end - start) < NODE_MIN_SIZE)
206  return;
207 
208  /* initialize remap allocator before aligning to ZONE_ALIGN */
209  init_alloc_remap(nid, start, end);
210 
211  start = roundup(start, ZONE_ALIGN);
212 
213  printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
214  nid, start, end - 1);
215 
216  /*
217  * Allocate node data. Try remap allocator first, node-local
218  * memory and then any node. Never allocate in DMA zone.
219  */
220  nd = alloc_remap(nid, nd_size);
221  if (nd) {
222  nd_pa = __pa(nd);
223  remapped = true;
224  } else {
225  nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
226  if (!nd_pa) {
227  pr_err("Cannot find %zu bytes in node %d\n",
228  nd_size, nid);
229  return;
230  }
231  nd = __va(nd_pa);
232  }
233 
234  /* report and initialize */
235  printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
236  nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
237  tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
238  if (!remapped && tnid != nid)
239  printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
240 
241  node_data[nid] = nd;
242  memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
243  NODE_DATA(nid)->node_id = nid;
244  NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
245  NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
246 
247  node_set_online(nid);
248 }
249 
261 {
262  const u64 low = 0;
263  const u64 high = PFN_PHYS(max_pfn);
264  int i, j, k;
265 
266  /* first, trim all entries */
267  for (i = 0; i < mi->nr_blks; i++) {
268  struct numa_memblk *bi = &mi->blk[i];
269 
270  /* make sure all blocks are inside the limits */
271  bi->start = max(bi->start, low);
272  bi->end = min(bi->end, high);
273 
274  /* and there's no empty block */
275  if (bi->start >= bi->end)
276  numa_remove_memblk_from(i--, mi);
277  }
278 
279  /* merge neighboring / overlapping entries */
280  for (i = 0; i < mi->nr_blks; i++) {
281  struct numa_memblk *bi = &mi->blk[i];
282 
283  for (j = i + 1; j < mi->nr_blks; j++) {
284  struct numa_memblk *bj = &mi->blk[j];
285  u64 start, end;
286 
287  /*
288  * See whether there are overlapping blocks. Whine
289  * about but allow overlaps of the same nid. They
290  * will be merged below.
291  */
292  if (bi->end > bj->start && bi->start < bj->end) {
293  if (bi->nid != bj->nid) {
294  pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
295  bi->nid, bi->start, bi->end - 1,
296  bj->nid, bj->start, bj->end - 1);
297  return -EINVAL;
298  }
299  pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
300  bi->nid, bi->start, bi->end - 1,
301  bj->start, bj->end - 1);
302  }
303 
304  /*
305  * Join together blocks on the same node, holes
306  * between which don't overlap with memory on other
307  * nodes.
308  */
309  if (bi->nid != bj->nid)
310  continue;
311  start = min(bi->start, bj->start);
312  end = max(bi->end, bj->end);
313  for (k = 0; k < mi->nr_blks; k++) {
314  struct numa_memblk *bk = &mi->blk[k];
315 
316  if (bi->nid == bk->nid)
317  continue;
318  if (start < bk->end && end > bk->start)
319  break;
320  }
321  if (k < mi->nr_blks)
322  continue;
323  printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
324  bi->nid, bi->start, bi->end - 1, bj->start,
325  bj->end - 1, start, end - 1);
326  bi->start = start;
327  bi->end = end;
328  numa_remove_memblk_from(j--, mi);
329  }
330  }
331 
332  /* clear unused ones */
333  for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
334  mi->blk[i].start = mi->blk[i].end = 0;
335  mi->blk[i].nid = NUMA_NO_NODE;
336  }
337 
338  return 0;
339 }
340 
341 /*
342  * Set nodes, which have memory in @mi, in *@nodemask.
343  */
344 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
345  const struct numa_meminfo *mi)
346 {
347  int i;
348 
349  for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
350  if (mi->blk[i].start != mi->blk[i].end &&
351  mi->blk[i].nid != NUMA_NO_NODE)
352  node_set(mi->blk[i].nid, *nodemask);
353 }
354 
362 {
363  size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
364 
365  /* numa_distance could be 1LU marking allocation failure, test cnt */
366  if (numa_distance_cnt)
367  memblock_free(__pa(numa_distance), size);
368  numa_distance_cnt = 0;
369  numa_distance = NULL; /* enable table creation */
370 }
371 
372 static int __init numa_alloc_distance(void)
373 {
374  nodemask_t nodes_parsed;
375  size_t size;
376  int i, j, cnt = 0;
377  u64 phys;
378 
379  /* size the new table and allocate it */
380  nodes_parsed = numa_nodes_parsed;
381  numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
382 
383  for_each_node_mask(i, nodes_parsed)
384  cnt = i;
385  cnt++;
386  size = cnt * cnt * sizeof(numa_distance[0]);
387 
389  size, PAGE_SIZE);
390  if (!phys) {
391  pr_warning("NUMA: Warning: can't allocate distance table!\n");
392  /* don't retry until explicitly reset */
393  numa_distance = (void *)1LU;
394  return -ENOMEM;
395  }
396  memblock_reserve(phys, size);
397 
398  numa_distance = __va(phys);
399  numa_distance_cnt = cnt;
400 
401  /* fill with the default distances */
402  for (i = 0; i < cnt; i++)
403  for (j = 0; j < cnt; j++)
404  numa_distance[i * cnt + j] = i == j ?
406  printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
407 
408  return 0;
409 }
410 
430 void __init numa_set_distance(int from, int to, int distance)
431 {
432  if (!numa_distance && numa_alloc_distance() < 0)
433  return;
434 
435  if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
436  from < 0 || to < 0) {
437  pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
438  from, to, distance);
439  return;
440  }
441 
442  if ((u8)distance != distance ||
443  (from == to && distance != LOCAL_DISTANCE)) {
444  pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
445  from, to, distance);
446  return;
447  }
448 
449  numa_distance[from * numa_distance_cnt + to] = distance;
450 }
451 
452 int __node_distance(int from, int to)
453 {
454  if (from >= numa_distance_cnt || to >= numa_distance_cnt)
455  return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
456  return numa_distance[from * numa_distance_cnt + to];
457 }
459 
460 /*
461  * Sanity check to catch more bad NUMA configurations (they are amazingly
462  * common). Make sure the nodes cover all memory.
463  */
464 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
465 {
466  u64 numaram, e820ram;
467  int i;
468 
469  numaram = 0;
470  for (i = 0; i < mi->nr_blks; i++) {
471  u64 s = mi->blk[i].start >> PAGE_SHIFT;
472  u64 e = mi->blk[i].end >> PAGE_SHIFT;
473  numaram += e - s;
474  numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
475  if ((s64)numaram < 0)
476  numaram = 0;
477  }
478 
479  e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
480 
481  /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
482  if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
483  printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
484  (numaram << PAGE_SHIFT) >> 20,
485  (e820ram << PAGE_SHIFT) >> 20);
486  return false;
487  }
488  return true;
489 }
490 
491 static int __init numa_register_memblks(struct numa_meminfo *mi)
492 {
493  unsigned long uninitialized_var(pfn_align);
494  int i, nid;
495 
496  /* Account for nodes with cpus and no memory */
497  node_possible_map = numa_nodes_parsed;
498  numa_nodemask_from_meminfo(&node_possible_map, mi);
500  return -EINVAL;
501 
502  for (i = 0; i < mi->nr_blks; i++) {
503  struct numa_memblk *mb = &mi->blk[i];
504  memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
505  }
506 
507  /*
508  * If sections array is gonna be used for pfn -> nid mapping, check
509  * whether its granularity is fine enough.
510  */
511 #ifdef NODE_NOT_IN_PAGE_FLAGS
512  pfn_align = node_map_pfn_alignment();
513  if (pfn_align && pfn_align < PAGES_PER_SECTION) {
514  printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
515  PFN_PHYS(pfn_align) >> 20,
516  PFN_PHYS(PAGES_PER_SECTION) >> 20);
517  return -EINVAL;
518  }
519 #endif
520  if (!numa_meminfo_cover_memory(mi))
521  return -EINVAL;
522 
523  /* Finally register nodes. */
525  u64 start = PFN_PHYS(max_pfn);
526  u64 end = 0;
527 
528  for (i = 0; i < mi->nr_blks; i++) {
529  if (nid != mi->blk[i].nid)
530  continue;
531  start = min(mi->blk[i].start, start);
532  end = max(mi->blk[i].end, end);
533  }
534 
535  if (start < end)
536  setup_node_data(nid, start, end);
537  }
538 
539  /* Dump memblock with node info and return. */
540  memblock_dump_all();
541  return 0;
542 }
543 
544 /*
545  * There are unfortunately some poorly designed mainboards around that
546  * only connect memory to a single CPU. This breaks the 1:1 cpu->node
547  * mapping. To avoid this fill in the mapping for all possible CPUs,
548  * as the number of CPUs is not known yet. We round robin the existing
549  * nodes.
550  */
551 static void __init numa_init_array(void)
552 {
553  int rr, i;
554 
556  for (i = 0; i < nr_cpu_ids; i++) {
557  if (early_cpu_to_node(i) != NUMA_NO_NODE)
558  continue;
559  numa_set_node(i, rr);
560  rr = next_node(rr, node_online_map);
561  if (rr == MAX_NUMNODES)
563  }
564 }
565 
566 static int __init numa_init(int (*init_func)(void))
567 {
568  int i;
569  int ret;
570 
571  for (i = 0; i < MAX_LOCAL_APIC; i++)
572  set_apicid_to_node(i, NUMA_NO_NODE);
573 
574  nodes_clear(numa_nodes_parsed);
577  memset(&numa_meminfo, 0, sizeof(numa_meminfo));
578  WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
580 
581  ret = init_func();
582  if (ret < 0)
583  return ret;
585  if (ret < 0)
586  return ret;
587 
588  numa_emulation(&numa_meminfo, numa_distance_cnt);
589 
590  ret = numa_register_memblks(&numa_meminfo);
591  if (ret < 0)
592  return ret;
593 
594  for (i = 0; i < nr_cpu_ids; i++) {
595  int nid = early_cpu_to_node(i);
596 
597  if (nid == NUMA_NO_NODE)
598  continue;
599  if (!node_online(nid))
600  numa_clear_node(i);
601  }
602  numa_init_array();
603  return 0;
604 }
605 
615 static int __init dummy_numa_init(void)
616 {
617  printk(KERN_INFO "%s\n",
618  numa_off ? "NUMA turned off" : "No NUMA configuration found");
619  printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
620  0LLU, PFN_PHYS(max_pfn) - 1);
621 
622  node_set(0, numa_nodes_parsed);
624 
625  return 0;
626 }
627 
636 {
637  if (!numa_off) {
638 #ifdef CONFIG_X86_NUMAQ
639  if (!numa_init(numaq_numa_init))
640  return;
641 #endif
642 #ifdef CONFIG_ACPI_NUMA
643  if (!numa_init(x86_acpi_numa_init))
644  return;
645 #endif
646 #ifdef CONFIG_AMD_NUMA
647  if (!numa_init(amd_numa_init))
648  return;
649 #endif
650  }
651 
652  numa_init(dummy_numa_init);
653 }
654 
655 static __init int find_near_online_node(int node)
656 {
657  int n, val;
658  int min_val = INT_MAX;
659  int best_node = -1;
660 
662  val = node_distance(node, n);
663 
664  if (val < min_val) {
665  min_val = val;
666  best_node = n;
667  }
668  }
669 
670  return best_node;
671 }
672 
673 /*
674  * Setup early cpu_to_node.
675  *
676  * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
677  * and apicid_to_node[] tables have valid entries for a CPU.
678  * This means we skip cpu_to_node[] initialisation for NUMA
679  * emulation and faking node case (when running a kernel compiled
680  * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
681  * is already initialized in a round robin manner at numa_init_array,
682  * prior to this call, and this initialization is good enough
683  * for the fake NUMA cases.
684  *
685  * Called before the per_cpu areas are setup.
686  */
688 {
689  int cpu;
690  u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
691 
692  BUG_ON(cpu_to_apicid == NULL);
693 
694  for_each_possible_cpu(cpu) {
695  int node = numa_cpu_node(cpu);
696 
697  if (node == NUMA_NO_NODE)
698  continue;
699  if (!node_online(node))
700  node = find_near_online_node(node);
701  numa_set_node(cpu, node);
702  }
703 }
704 
705 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
706 
707 # ifndef CONFIG_NUMA_EMU
709 {
710  cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
711 }
712 
714 {
715  cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
716 }
717 # endif /* !CONFIG_NUMA_EMU */
718 
719 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
720 
721 int __cpu_to_node(int cpu)
722 {
723  if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
725  "cpu_to_node(%d): usage too early!\n", cpu);
726  dump_stack();
727  return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
728  }
729  return per_cpu(x86_cpu_to_node_map, cpu);
730 }
731 EXPORT_SYMBOL(__cpu_to_node);
732 
733 /*
734  * Same function as cpu_to_node() but used if called before the
735  * per_cpu areas are setup.
736  */
737 int early_cpu_to_node(int cpu)
738 {
739  if (early_per_cpu_ptr(x86_cpu_to_node_map))
740  return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
741 
742  if (!cpu_possible(cpu)) {
744  "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
745  dump_stack();
746  return NUMA_NO_NODE;
747  }
748  return per_cpu(x86_cpu_to_node_map, cpu);
749 }
750 
751 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
752 {
753  struct cpumask *mask;
754  char buf[64];
755 
756  if (node == NUMA_NO_NODE) {
757  /* early_cpu_to_node() already emits a warning and trace */
758  return;
759  }
760  mask = node_to_cpumask_map[node];
761  if (!mask) {
762  pr_err("node_to_cpumask_map[%i] NULL\n", node);
763  dump_stack();
764  return;
765  }
766 
767  if (enable)
768  cpumask_set_cpu(cpu, mask);
769  else
770  cpumask_clear_cpu(cpu, mask);
771 
772  cpulist_scnprintf(buf, sizeof(buf), mask);
773  printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
774  enable ? "numa_add_cpu" : "numa_remove_cpu",
775  cpu, node, buf);
776  return;
777 }
778 
779 # ifndef CONFIG_NUMA_EMU
780 static void __cpuinit numa_set_cpumask(int cpu, bool enable)
781 {
782  debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
783 }
784 
785 void __cpuinit numa_add_cpu(int cpu)
786 {
787  numa_set_cpumask(cpu, true);
788 }
789 
790 void __cpuinit numa_remove_cpu(int cpu)
791 {
792  numa_set_cpumask(cpu, false);
793 }
794 # endif /* !CONFIG_NUMA_EMU */
795 
796 /*
797  * Returns a pointer to the bitmask of CPUs on Node 'node'.
798  */
799 const struct cpumask *cpumask_of_node(int node)
800 {
801  if (node >= nr_node_ids) {
803  "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
804  node, nr_node_ids);
805  dump_stack();
806  return cpu_none_mask;
807  }
808  if (node_to_cpumask_map[node] == NULL) {
810  "cpumask_of_node(%d): no node_to_cpumask_map!\n",
811  node);
812  dump_stack();
813  return cpu_online_mask;
814  }
815  return node_to_cpumask_map[node];
816 }
818 
819 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
820 
821 #ifdef CONFIG_MEMORY_HOTPLUG
822 int memory_add_physaddr_to_nid(u64 start)
823 {
824  struct numa_meminfo *mi = &numa_meminfo;
825  int nid = mi->blk[0].nid;
826  int i;
827 
828  for (i = 0; i < mi->nr_blks; i++)
829  if (mi->blk[i].start <= start && mi->blk[i].end > start)
830  nid = mi->blk[i].nid;
831  return nid;
832 }
833 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
834 #endif