Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
init.c
Go to the documentation of this file.
1 /*
2  * Based on arch/arm/mm/init.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
32 #include <linux/of_fdt.h>
33 
34 #include <asm/prom.h>
35 #include <asm/sections.h>
36 #include <asm/setup.h>
37 #include <asm/sizes.h>
38 #include <asm/tlb.h>
39 
40 #include "mm.h"
41 
42 static unsigned long phys_initrd_start __initdata = 0;
43 static unsigned long phys_initrd_size __initdata = 0;
44 
46 
48  unsigned long end)
49 {
50  phys_initrd_start = start;
51  phys_initrd_size = end - start;
52 }
53 
54 static int __init early_initrd(char *p)
55 {
56  unsigned long start, size;
57  char *endp;
58 
59  start = memparse(p, &endp);
60  if (*endp == ',') {
61  size = memparse(endp + 1, NULL);
62 
63  phys_initrd_start = start;
64  phys_initrd_size = size;
65  }
66  return 0;
67 }
68 early_param("initrd", early_initrd);
69 
70 #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
71 
72 static void __init zone_sizes_init(unsigned long min, unsigned long max)
73 {
74  struct memblock_region *reg;
75  unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
76  unsigned long max_dma32 = min;
77 
78  memset(zone_size, 0, sizeof(zone_size));
79 
80 #ifdef CONFIG_ZONE_DMA32
81  /* 4GB maximum for 32-bit only capable devices */
82  max_dma32 = min(max, MAX_DMA32_PFN);
83  zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
84 #endif
85  zone_size[ZONE_NORMAL] = max - max_dma32;
86 
87  memcpy(zhole_size, zone_size, sizeof(zhole_size));
88 
89  for_each_memblock(memory, reg) {
90  unsigned long start = memblock_region_memory_base_pfn(reg);
91  unsigned long end = memblock_region_memory_end_pfn(reg);
92 
93  if (start >= max)
94  continue;
95 #ifdef CONFIG_ZONE_DMA32
96  if (start < max_dma32) {
97  unsigned long dma_end = min(end, max_dma32);
98  zhole_size[ZONE_DMA32] -= dma_end - start;
99  }
100 #endif
101  if (end > max_dma32) {
102  unsigned long normal_end = min(end, max);
103  unsigned long normal_start = max(start, max_dma32);
104  zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
105  }
106  }
107 
108  free_area_init_node(0, zone_size, min, zhole_size);
109 }
110 
111 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
112 int pfn_valid(unsigned long pfn)
113 {
114  return memblock_is_memory(pfn << PAGE_SHIFT);
115 }
117 #endif
118 
119 #ifndef CONFIG_SPARSEMEM
120 static void arm64_memory_present(void)
121 {
122 }
123 #else
124 static void arm64_memory_present(void)
125 {
126  struct memblock_region *reg;
127 
128  for_each_memblock(memory, reg)
129  memory_present(0, memblock_region_memory_base_pfn(reg),
130  memblock_region_memory_end_pfn(reg));
131 }
132 #endif
133 
135 {
136  u64 *reserve_map, base, size;
137 
138  /* Register the kernel text, kernel data and initrd with memblock */
140 #ifdef CONFIG_BLK_DEV_INITRD
141  if (phys_initrd_size) {
142  memblock_reserve(phys_initrd_start, phys_initrd_size);
143 
144  /* Now convert initrd to virtual addresses */
145  initrd_start = __phys_to_virt(phys_initrd_start);
146  initrd_end = initrd_start + phys_initrd_size;
147  }
148 #endif
149 
150  /*
151  * Reserve the page tables. These are already in use,
152  * and can only be in node 0.
153  */
156 
157  /* Reserve the dtb region */
159  be32_to_cpu(initial_boot_params->totalsize));
160 
161  /*
162  * Process the reserve map. This will probably overlap the initrd
163  * and dtb locations which are already reserved, but overlapping
164  * doesn't hurt anything
165  */
166  reserve_map = ((void*)initial_boot_params) +
167  be32_to_cpu(initial_boot_params->off_mem_rsvmap);
168  while (1) {
169  base = be64_to_cpup(reserve_map++);
170  size = be64_to_cpup(reserve_map++);
171  if (!size)
172  break;
173  memblock_reserve(base, size);
174  }
175 
177  memblock_dump_all();
178 }
179 
181 {
182  unsigned long min, max;
183 
186 
187  /*
188  * Sparsemem tries to allocate bootmem in memory_present(), so must be
189  * done after the fixed reservations.
190  */
191  arm64_memory_present();
192 
193  sparse_init();
194  zone_sizes_init(min, max);
195 
196  high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
197  max_pfn = max_low_pfn = max;
198 }
199 
200 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201 {
202  unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203 
204  for (; pfn < end; pfn++) {
205  struct page *page = pfn_to_page(pfn);
206  ClearPageReserved(page);
207  init_page_count(page);
208  __free_page(page);
209  pages++;
210  }
211 
212  if (size && s)
213  pr_info("Freeing %s memory: %dK\n", s, size);
214 
215  return pages;
216 }
217 
218 /*
219  * Poison init memory with an undefined instruction (0x0).
220  */
221 static inline void poison_init_mem(void *s, size_t count)
222 {
223  memset(s, 0, count);
224 }
225 
226 #ifndef CONFIG_SPARSEMEM_VMEMMAP
227 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
228 {
229  struct page *start_pg, *end_pg;
230  unsigned long pg, pgend;
231 
232  /*
233  * Convert start_pfn/end_pfn to a struct page pointer.
234  */
235  start_pg = pfn_to_page(start_pfn - 1) + 1;
236  end_pg = pfn_to_page(end_pfn - 1) + 1;
237 
238  /*
239  * Convert to physical addresses, and round start upwards and end
240  * downwards.
241  */
242  pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
243  pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
244 
245  /*
246  * If there are free pages between these, free the section of the
247  * memmap array.
248  */
249  if (pg < pgend)
250  free_bootmem(pg, pgend - pg);
251 }
252 
253 /*
254  * The mem_map array can get very big. Free the unused area of the memory map.
255  */
256 static void __init free_unused_memmap(void)
257 {
258  unsigned long start, prev_end = 0;
259  struct memblock_region *reg;
260 
261  for_each_memblock(memory, reg) {
262  start = __phys_to_pfn(reg->base);
263 
264 #ifdef CONFIG_SPARSEMEM
265  /*
266  * Take care not to free memmap entries that don't exist due
267  * to SPARSEMEM sections which aren't present.
268  */
269  start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
270 #endif
271  /*
272  * If we had a previous bank, and there is a space between the
273  * current bank and the previous, free it.
274  */
275  if (prev_end && prev_end < start)
276  free_memmap(prev_end, start);
277 
278  /*
279  * Align up here since the VM subsystem insists that the
280  * memmap entries are valid from the bank end aligned to
281  * MAX_ORDER_NR_PAGES.
282  */
283  prev_end = ALIGN(start + __phys_to_pfn(reg->size),
285  }
286 
287 #ifdef CONFIG_SPARSEMEM
288  if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
289  free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
290 #endif
291 }
292 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
293 
294 /*
295  * mem_init() marks the free areas in the mem_map and tells us how much memory
296  * is free. This is done after various parts of the system have claimed their
297  * memory after the kernel image.
298  */
299 void __init mem_init(void)
300 {
301  unsigned long reserved_pages, free_pages;
302  struct memblock_region *reg;
303 
305 
307 
308 #ifndef CONFIG_SPARSEMEM_VMEMMAP
309  /* this will put all unused low memory onto the freelists */
310  free_unused_memmap();
311 #endif
312 
313  totalram_pages += free_all_bootmem();
314 
315  reserved_pages = free_pages = 0;
316 
317  for_each_memblock(memory, reg) {
318  unsigned int pfn1, pfn2;
319  struct page *page, *end;
320 
321  pfn1 = __phys_to_pfn(reg->base);
322  pfn2 = pfn1 + __phys_to_pfn(reg->size);
323 
324  page = pfn_to_page(pfn1);
325  end = pfn_to_page(pfn2 - 1) + 1;
326 
327  do {
328  if (PageReserved(page))
329  reserved_pages++;
330  else if (!page_count(page))
331  free_pages++;
332  page++;
333  } while (page < end);
334  }
335 
336  /*
337  * Since our memory may not be contiguous, calculate the real number
338  * of pages we have in this system.
339  */
340  pr_info("Memory:");
341  num_physpages = 0;
342  for_each_memblock(memory, reg) {
343  unsigned long pages = memblock_region_memory_end_pfn(reg) -
344  memblock_region_memory_base_pfn(reg);
345  num_physpages += pages;
346  printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
347  }
348  printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
349 
350  pr_notice("Memory: %luk/%luk available, %luk reserved\n",
351  nr_free_pages() << (PAGE_SHIFT-10),
352  free_pages << (PAGE_SHIFT-10),
353  reserved_pages << (PAGE_SHIFT-10));
354 
355 #define MLK(b, t) b, t, ((t) - (b)) >> 10
356 #define MLM(b, t) b, t, ((t) - (b)) >> 20
357 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
358 
359  pr_notice("Virtual kernel memory layout:\n"
360  " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
361 #ifdef CONFIG_SPARSEMEM_VMEMMAP
362  " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
363 #endif
364  " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
365  " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
366  " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
367  " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
368  " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
370 #ifdef CONFIG_SPARSEMEM_VMEMMAP
371  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
372  (unsigned long)virt_to_page(high_memory)),
373 #endif
375  MLM(PAGE_OFFSET, (unsigned long)high_memory),
376 
380 
381 #undef MLK
382 #undef MLM
383 #undef MLK_ROUNDUP
384 
385  /*
386  * Check boundaries twice: Some fundamental inconsistencies can be
387  * detected at build time already.
388  */
389 #ifdef CONFIG_COMPAT
390  BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
391 #endif
394 
395  if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
396  extern int sysctl_overcommit_memory;
397  /*
398  * On a machine this small we won't get anywhere without
399  * overcommit, so turn it on by default.
400  */
401  sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
402  }
403 }
404 
405 void free_initmem(void)
406 {
407  poison_init_mem(__init_begin, __init_end - __init_begin);
408  totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
410  "init");
411 }
412 
413 #ifdef CONFIG_BLK_DEV_INITRD
414 
415 static int keep_initrd;
416 
417 void free_initrd_mem(unsigned long start, unsigned long end)
418 {
419  if (!keep_initrd) {
420  poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
421  totalram_pages += free_area(__phys_to_pfn(__pa(start)),
422  __phys_to_pfn(__pa(end)),
423  "initrd");
424  }
425 }
426 
427 static int __init keepinitrd_setup(char *__unused)
428 {
429  keep_initrd = 1;
430  return 1;
431 }
432 
433 __setup("keepinitrd", keepinitrd_setup);
434 #endif