5 #include <linux/slab.h>
9 #include <linux/export.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section *mem_section[NR_SECTION_ROOTS]
26 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
37 #if MAX_NUMNODES <= 256
43 int page_to_nid(
const struct page *
page)
45 return section_to_node_table[page_to_section(page)];
49 static void set_section_nid(
unsigned long section_nr,
int nid)
51 section_to_node_table[section_nr] = nid;
54 static inline void set_section_nid(
unsigned long section_nr,
int nid)
59 #ifdef CONFIG_SPARSEMEM_EXTREME
63 unsigned long array_size = SECTIONS_PER_ROOT *
64 sizeof(
struct mem_section);
68 section = kzalloc_node(array_size,
GFP_KERNEL, nid);
80 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81 struct mem_section *section;
84 if (mem_section[root])
87 section = sparse_index_alloc(nid);
91 mem_section[root] = section;
109 unsigned long root_nr;
110 struct mem_section* root;
112 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
113 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
117 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
123 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
132 static inline unsigned long sparse_encode_early_nid(
int nid)
134 return (nid << SECTION_NID_SHIFT);
137 static inline int sparse_early_nid(
struct mem_section *section)
139 return (section->section_mem_map >> SECTION_NID_SHIFT);
144 unsigned long *end_pfn)
152 if (*start_pfn > max_sparsemem_pfn) {
154 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
155 *start_pfn, *end_pfn, max_sparsemem_pfn);
157 *start_pfn = max_sparsemem_pfn;
158 *end_pfn = max_sparsemem_pfn;
159 }
else if (*end_pfn > max_sparsemem_pfn) {
161 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
162 *start_pfn, *end_pfn, max_sparsemem_pfn);
164 *end_pfn = max_sparsemem_pfn;
173 start &= PAGE_SECTION_MASK;
175 for (pfn = start; pfn <
end; pfn += PAGES_PER_SECTION) {
176 unsigned long section = pfn_to_section_nr(pfn);
177 struct mem_section *
ms;
180 set_section_nid(section, nid);
182 ms = __nr_to_section(section);
183 if (!ms->section_mem_map)
184 ms->section_mem_map = sparse_encode_early_nid(nid) |
185 SECTION_MARKED_PRESENT;
194 unsigned long end_pfn)
197 unsigned long nr_pages = 0;
200 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
201 if (nid != early_pfn_to_nid(pfn))
204 if (pfn_present(pfn))
205 nr_pages += PAGES_PER_SECTION;
208 return nr_pages *
sizeof(
struct page);
216 static unsigned long sparse_encode_mem_map(
struct page *
mem_map,
unsigned long pnum)
218 return (
unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
227 coded_mem_map &= SECTION_MAP_MASK;
228 return ((
struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
231 static int __meminit sparse_init_one_section(
struct mem_section *
ms,
232 unsigned long pnum,
struct page *mem_map,
233 unsigned long *pageblock_bitmap)
235 if (!present_section(ms))
238 ms->section_mem_map &= ~SECTION_MAP_MASK;
239 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
241 ms->pageblock_flags = pageblock_bitmap;
248 unsigned long size_bytes;
249 size_bytes =
roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
250 size_bytes =
roundup(size_bytes,
sizeof(
unsigned long));
254 #ifdef CONFIG_MEMORY_HOTPLUG
255 static unsigned long *__kmalloc_section_usemap(
void)
261 #ifdef CONFIG_MEMORY_HOTREMOVE
262 static unsigned long *
__init
263 sparse_early_usemaps_alloc_pgdat_section(
struct pglist_data *pgdat,
266 unsigned long goal,
limit;
280 limit = goal + (1
UL << PA_SECTION_SHIFT);
292 static void __init check_usemap_section_nr(
int nid,
unsigned long *usemap)
294 unsigned long usemap_snr, pgdat_snr;
295 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
296 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
302 if (usemap_snr == pgdat_snr)
305 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
309 old_usemap_snr = usemap_snr;
310 old_pgdat_snr = pgdat_snr;
312 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
313 if (usemap_nid != nid) {
315 "node %d must be removed before remove section %ld\n",
328 " have a circular dependency on usemap and pgdat allocations\n");
331 static unsigned long *
__init
332 sparse_early_usemaps_alloc_pgdat_section(
struct pglist_data *pgdat,
338 static void __init check_usemap_section_nr(
int nid,
unsigned long *usemap)
343 static void __init sparse_early_usemaps_alloc_node(
unsigned long**usemap_map,
344 unsigned long pnum_begin,
345 unsigned long pnum_end,
346 unsigned long usemap_count,
int nodeid)
352 usemap = sparse_early_usemaps_alloc_pgdat_section(
NODE_DATA(nodeid),
353 size * usemap_count);
359 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
360 if (!present_section_nr(pnum))
362 usemap_map[pnum] = usemap;
364 check_usemap_section_nr(nodeid, usemap_map[pnum]);
368 #ifndef CONFIG_SPARSEMEM_VMEMMAP
384 unsigned long pnum_begin,
385 unsigned long pnum_end,
386 unsigned long map_count,
int nodeid)
390 unsigned long size =
sizeof(
struct page) * PAGES_PER_SECTION;
394 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
395 if (!present_section_nr(pnum))
407 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
408 if (!present_section_nr(pnum))
417 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
418 struct mem_section *
ms;
420 if (!present_section_nr(pnum))
425 ms = __nr_to_section(pnum);
427 "some memory will not be available.\n", __func__);
428 ms->section_mem_map = 0;
433 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
434 static void __init sparse_early_mem_maps_alloc_node(
struct page **map_map,
435 unsigned long pnum_begin,
436 unsigned long pnum_end,
437 unsigned long map_count,
int nodeid)
443 static struct page __init *sparse_early_mem_map_alloc(
unsigned long pnum)
446 struct mem_section *ms = __nr_to_section(pnum);
447 int nid = sparse_early_nid(ms);
454 "some memory will not be available.\n", __func__);
455 ms->section_mem_map = 0;
472 unsigned long *usemap;
473 unsigned long **usemap_map;
475 int nodeid_begin = 0;
476 unsigned long pnum_begin = 0;
477 unsigned long usemap_count;
478 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
479 unsigned long map_count;
481 struct page **map_map;
498 size =
sizeof(
unsigned long *) * NR_MEM_SECTIONS;
501 panic(
"can not allocate usemap_map\n");
503 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
504 struct mem_section *
ms;
506 if (!present_section_nr(pnum))
508 ms = __nr_to_section(pnum);
509 nodeid_begin = sparse_early_nid(ms);
514 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
515 struct mem_section *
ms;
518 if (!present_section_nr(pnum))
520 ms = __nr_to_section(pnum);
521 nodeid = sparse_early_nid(ms);
522 if (nodeid == nodeid_begin) {
527 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
528 usemap_count, nodeid_begin);
530 nodeid_begin = nodeid;
535 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
536 usemap_count, nodeid_begin);
538 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
539 size2 =
sizeof(
struct page *) * NR_MEM_SECTIONS;
542 panic(
"can not allocate map_map\n");
544 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
545 struct mem_section *
ms;
547 if (!present_section_nr(pnum))
549 ms = __nr_to_section(pnum);
550 nodeid_begin = sparse_early_nid(ms);
555 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
556 struct mem_section *
ms;
559 if (!present_section_nr(pnum))
561 ms = __nr_to_section(pnum);
562 nodeid = sparse_early_nid(ms);
563 if (nodeid == nodeid_begin) {
568 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
569 map_count, nodeid_begin);
571 nodeid_begin = nodeid;
576 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
577 map_count, nodeid_begin);
580 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
581 if (!present_section_nr(pnum))
584 usemap = usemap_map[pnum];
588 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
591 map = sparse_early_mem_map_alloc(pnum);
596 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
600 vmemmap_populate_print_last();
602 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
608 #ifdef CONFIG_MEMORY_HOTPLUG
609 #ifdef CONFIG_SPARSEMEM_VMEMMAP
610 static inline struct page *kmalloc_section_memmap(
unsigned long pnum,
int nid,
611 unsigned long nr_pages)
616 static void __kfree_section_memmap(
struct page *
memmap,
unsigned long nr_pages)
620 static void free_map_bootmem(
struct page *
memmap,
unsigned long nr_pages)
624 static struct page *__kmalloc_section_memmap(
unsigned long nr_pages)
627 unsigned long memmap_size =
sizeof(
struct page) * nr_pages;
641 memset(ret, 0, memmap_size);
646 static inline struct page *kmalloc_section_memmap(
unsigned long pnum,
int nid,
647 unsigned long nr_pages)
649 return __kmalloc_section_memmap(nr_pages);
652 static void __kfree_section_memmap(
struct page *
memmap,
unsigned long nr_pages)
654 if (is_vmalloc_addr(memmap))
658 get_order(
sizeof(
struct page) * nr_pages));
661 static void free_map_bootmem(
struct page *memmap,
unsigned long nr_pages)
663 unsigned long maps_section_nr, removing_section_nr,
i;
667 for (i = 0; i < nr_pages; i++, page++) {
668 magic = (
unsigned long) page->
lru.next;
670 BUG_ON(magic == NODE_INFO);
672 maps_section_nr = pfn_to_section_nr(
page_to_pfn(page));
673 removing_section_nr = page->
private;
683 if (maps_section_nr != removing_section_nr)
684 put_page_bootmem(page);
689 static void free_section_usemap(
struct page *memmap,
unsigned long *usemap)
691 struct page *usemap_page;
692 unsigned long nr_pages;
701 if (PageSlab(usemap_page)) {
704 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
714 nr_pages =
PAGE_ALIGN(PAGES_PER_SECTION *
sizeof(
struct page))
717 free_map_bootmem(memmap, nr_pages);
729 unsigned long section_nr = pfn_to_section_nr(start_pfn);
731 struct mem_section *
ms;
733 unsigned long *usemap;
742 if (ret < 0 && ret != -
EEXIST)
744 memmap = kmalloc_section_memmap(section_nr, pgdat->
node_id, nr_pages);
747 usemap = __kmalloc_section_usemap();
749 __kfree_section_memmap(memmap, nr_pages);
753 pgdat_resize_lock(pgdat, &flags);
755 ms = __pfn_to_section(start_pfn);
756 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
761 ms->section_mem_map |= SECTION_MARKED_PRESENT;
763 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
766 pgdat_resize_unlock(pgdat, &flags);
769 __kfree_section_memmap(memmap, nr_pages);
776 struct page *memmap =
NULL;
777 unsigned long *usemap =
NULL;
779 if (ms->section_mem_map) {
780 usemap = ms->pageblock_flags;
783 ms->section_mem_map = 0;
784 ms->pageblock_flags =
NULL;
787 free_section_usemap(memmap, usemap);