Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
init.c
Go to the documentation of this file.
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  * Copyright (C) 1999 Niibe Yutaka
5  * Copyright (C) 2002 - 2011 Paul Mundt
6  *
7  * Based on linux/arch/i386/mm/init.c:
8  * Copyright (C) 1995 Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/memblock.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/sizes.h>
31 
33 
35 {
37 }
38 
40 {
41  /* Nothing to see here, move along. */
42 }
43 
44 #ifdef CONFIG_MMU
45 static pte_t *__get_pte_phys(unsigned long addr)
46 {
47  pgd_t *pgd;
48  pud_t *pud;
49  pmd_t *pmd;
50 
51  pgd = pgd_offset_k(addr);
52  if (pgd_none(*pgd)) {
53  pgd_ERROR(*pgd);
54  return NULL;
55  }
56 
57  pud = pud_alloc(NULL, pgd, addr);
58  if (unlikely(!pud)) {
59  pud_ERROR(*pud);
60  return NULL;
61  }
62 
63  pmd = pmd_alloc(NULL, pud, addr);
64  if (unlikely(!pmd)) {
65  pmd_ERROR(*pmd);
66  return NULL;
67  }
68 
69  return pte_offset_kernel(pmd, addr);
70 }
71 
72 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73 {
74  pte_t *pte;
75 
76  pte = __get_pte_phys(addr);
77  if (!pte_none(*pte)) {
78  pte_ERROR(*pte);
79  return;
80  }
81 
82  set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
83  local_flush_tlb_one(get_asid(), addr);
84 
85  if (pgprot_val(prot) & _PAGE_WIRED)
86  tlb_wire_entry(NULL, addr, *pte);
87 }
88 
89 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
90 {
91  pte_t *pte;
92 
93  pte = __get_pte_phys(addr);
94 
95  if (pgprot_val(prot) & _PAGE_WIRED)
97 
98  set_pte(pte, pfn_pte(0, __pgprot(0)));
99  local_flush_tlb_one(get_asid(), addr);
100 }
101 
102 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103 {
104  unsigned long address = __fix_to_virt(idx);
105 
106  if (idx >= __end_of_fixed_addresses) {
107  BUG();
108  return;
109  }
110 
111  set_pte_phys(address, phys, prot);
112 }
113 
114 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115 {
116  unsigned long address = __fix_to_virt(idx);
117 
118  if (idx >= __end_of_fixed_addresses) {
119  BUG();
120  return;
121  }
122 
123  clear_pte_phys(address, prot);
124 }
125 
126 static pmd_t * __init one_md_table_init(pud_t *pud)
127 {
128  if (pud_none(*pud)) {
129  pmd_t *pmd;
130 
132  pud_populate(&init_mm, pud, pmd);
133  BUG_ON(pmd != pmd_offset(pud, 0));
134  }
135 
136  return pmd_offset(pud, 0);
137 }
138 
139 static pte_t * __init one_page_table_init(pmd_t *pmd)
140 {
141  if (pmd_none(*pmd)) {
142  pte_t *pte;
143 
145  pmd_populate_kernel(&init_mm, pmd, pte);
146  BUG_ON(pte != pte_offset_kernel(pmd, 0));
147  }
148 
149  return pte_offset_kernel(pmd, 0);
150 }
151 
152 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153  unsigned long vaddr, pte_t *lastpte)
154 {
155  return pte;
156 }
157 
158 void __init page_table_range_init(unsigned long start, unsigned long end,
159  pgd_t *pgd_base)
160 {
161  pgd_t *pgd;
162  pud_t *pud;
163  pmd_t *pmd;
164  pte_t *pte = NULL;
165  int i, j, k;
166  unsigned long vaddr;
167 
168  vaddr = start;
169  i = __pgd_offset(vaddr);
170  j = __pud_offset(vaddr);
171  k = __pmd_offset(vaddr);
172  pgd = pgd_base + i;
173 
174  for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
175  pud = (pud_t *)pgd;
176  for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177  pmd = one_md_table_init(pud);
178 #ifndef __PAGETABLE_PMD_FOLDED
179  pmd += k;
180 #endif
181  for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182  pte = page_table_kmap_check(one_page_table_init(pmd),
183  pmd, vaddr, pte);
184  vaddr += PMD_SIZE;
185  }
186  k = 0;
187  }
188  j = 0;
189  }
190 }
191 #endif /* CONFIG_MMU */
192 
193 void __init allocate_pgdat(unsigned int nid)
194 {
195  unsigned long start_pfn, end_pfn;
196 #ifdef CONFIG_NEED_MULTIPLE_NODES
197  unsigned long phys;
198 #endif
199 
200  get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201 
202 #ifdef CONFIG_NEED_MULTIPLE_NODES
203  phys = __memblock_alloc_base(sizeof(struct pglist_data),
204  SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205  /* Retry with all of system memory */
206  if (!phys)
207  phys = __memblock_alloc_base(sizeof(struct pglist_data),
209  if (!phys)
210  panic("Can't allocate pgdat for node %d\n", nid);
211 
212  NODE_DATA(nid) = __va(phys);
213  memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
214 
215  NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
216 #endif
217 
218  NODE_DATA(nid)->node_start_pfn = start_pfn;
219  NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
220 }
221 
222 static void __init bootmem_init_one_node(unsigned int nid)
223 {
224  unsigned long total_pages, paddr;
225  unsigned long end_pfn;
226  struct pglist_data *p;
227 
228  p = NODE_DATA(nid);
229 
230  /* Nothing to do.. */
231  if (!p->node_spanned_pages)
232  return;
233 
234  end_pfn = p->node_start_pfn + p->node_spanned_pages;
235 
236  total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
237 
238  paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
239  if (!paddr)
240  panic("Can't allocate bootmap for nid[%d]\n", nid);
241 
242  init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
243 
244  free_bootmem_with_active_regions(nid, end_pfn);
245 
246  /*
247  * XXX Handle initial reservations for the system memory node
248  * only for the moment, we'll refactor this later for handling
249  * reservations in other nodes.
250  */
251  if (nid == 0) {
252  struct memblock_region *reg;
253 
254  /* Reserve the sections we're already using. */
255  for_each_memblock(reserved, reg) {
256  reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
257  }
258  }
259 
260  sparse_memory_present_with_active_regions(nid);
261 }
262 
263 static void __init do_init_bootmem(void)
264 {
265  struct memblock_region *reg;
266  int i;
267 
268  /* Add active regions with valid PFNs. */
269  for_each_memblock(memory, reg) {
270  unsigned long start_pfn, end_pfn;
271  start_pfn = memblock_region_memory_base_pfn(reg);
272  end_pfn = memblock_region_memory_end_pfn(reg);
273  __add_active_range(0, start_pfn, end_pfn);
274  }
275 
276  /* All of system RAM sits in node 0 for the non-NUMA case */
277  allocate_pgdat(0);
278  node_set_online(0);
279 
280  plat_mem_setup();
281 
283  bootmem_init_one_node(i);
284 
285  sparse_init();
286 }
287 
288 static void __init early_reserve_mem(void)
289 {
290  unsigned long start_pfn;
291  u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
292  u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
293 
294  /*
295  * Partially used pages are not usable - thus
296  * we are rounding upwards:
297  */
298  start_pfn = PFN_UP(__pa(_end));
299 
300  /*
301  * Reserve the kernel text and Reserve the bootmem bitmap. We do
302  * this in two steps (first step was init_bootmem()), because
303  * this catches the (definitely buggy) case of us accidentally
304  * initializing the bootmem allocator with an invalid RAM area.
305  */
306  memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
307 
308  /*
309  * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
310  */
311  if (CONFIG_ZERO_PAGE_OFFSET != 0)
312  memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
313 
314  /*
315  * Handle additional early reservations
316  */
319 }
320 
321 void __init paging_init(void)
322 {
323  unsigned long max_zone_pfns[MAX_NR_ZONES];
324  unsigned long vaddr, end;
325  int nid;
326 
327  sh_mv.mv_mem_init();
328 
329  early_reserve_mem();
330 
331  /*
332  * Once the early reservations are out of the way, give the
333  * platforms a chance to kick out some memory.
334  */
335  if (sh_mv.mv_mem_reserve)
336  sh_mv.mv_mem_reserve();
337 
340 
341  memblock_dump_all();
342 
343  /*
344  * Determine low and high memory ranges:
345  */
348 
350 
351  memory_start = (unsigned long)__va(__MEMORY_START);
353 
354  uncached_init();
355  pmb_init();
356  do_init_bootmem();
358 
359  /* We don't need to map the kernel through the TLB, as
360  * it is permanatly mapped using P1. So clear the
361  * entire pgd. */
362  memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
363 
364  /* Set an initial value for the MMU.TTB so we don't have to
365  * check for a null value. */
366  set_TTB(swapper_pg_dir);
367 
368  /*
369  * Populate the relevant portions of swapper_pg_dir so that
370  * we can use the fixmap entries without calling kmalloc.
371  * pte's will be filled in by __set_fixmap().
372  */
374  end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
375  page_table_range_init(vaddr, end, swapper_pg_dir);
376 
378 
379  memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
380 
381  for_each_online_node(nid) {
382  pg_data_t *pgdat = NODE_DATA(nid);
383  unsigned long low, start_pfn;
384 
385  start_pfn = pgdat->bdata->node_min_pfn;
386  low = pgdat->bdata->node_low_pfn;
387 
388  if (max_zone_pfns[ZONE_NORMAL] < low)
389  max_zone_pfns[ZONE_NORMAL] = low;
390 
391  printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
392  nid, start_pfn, low);
393  }
394 
395  free_area_init_nodes(max_zone_pfns);
396 }
397 
398 /*
399  * Early initialization for any I/O MMUs we might have.
400  */
401 static void __init iommu_init(void)
402 {
403  no_iommu_init();
404 }
405 
406 unsigned int mem_init_done = 0;
407 
408 void __init mem_init(void)
409 {
410  int codesize, datasize, initsize;
411  int nid;
412 
413  iommu_init();
414 
415  num_physpages = 0;
416  high_memory = NULL;
417 
418  for_each_online_node(nid) {
419  pg_data_t *pgdat = NODE_DATA(nid);
420  unsigned long node_pages = 0;
421  void *node_high_memory;
422 
424 
425  if (pgdat->node_spanned_pages)
426  node_pages = free_all_bootmem_node(pgdat);
427 
428  totalram_pages += node_pages;
429 
430  node_high_memory = (void *)__va((pgdat->node_start_pfn +
431  pgdat->node_spanned_pages) <<
432  PAGE_SHIFT);
433  if (node_high_memory > high_memory)
434  high_memory = node_high_memory;
435  }
436 
437  /* Set this up early, so we can take care of the zero page */
438  cpu_cache_init();
439 
440  /* clear the zero-page */
443 
444  vsyscall_init();
445 
446  codesize = (unsigned long) &_etext - (unsigned long) &_text;
447  datasize = (unsigned long) &_edata - (unsigned long) &_etext;
448  initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
449 
450  printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
451  "%dk data, %dk init)\n",
452  nr_free_pages() << (PAGE_SHIFT-10),
453  num_physpages << (PAGE_SHIFT-10),
454  codesize >> 10,
455  datasize >> 10,
456  initsize >> 10);
457 
458  printk(KERN_INFO "virtual kernel memory layout:\n"
459  " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
460 #ifdef CONFIG_HIGHMEM
461  " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
462 #endif
463  " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
464  " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
465 #ifdef CONFIG_UNCACHED_MAPPING
466  " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
467 #endif
468  " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
469  " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
470  " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
472  (FIXADDR_TOP - FIXADDR_START) >> 10,
473 
474 #ifdef CONFIG_HIGHMEM
475  PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
476  (LAST_PKMAP*PAGE_SIZE) >> 10,
477 #endif
478 
479  (unsigned long)VMALLOC_START, VMALLOC_END,
480  (VMALLOC_END - VMALLOC_START) >> 20,
481 
482  (unsigned long)memory_start, (unsigned long)high_memory,
483  ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
484 
485 #ifdef CONFIG_UNCACHED_MAPPING
487 #endif
488 
489  (unsigned long)&__init_begin, (unsigned long)&__init_end,
490  ((unsigned long)&__init_end -
491  (unsigned long)&__init_begin) >> 10,
492 
493  (unsigned long)&_etext, (unsigned long)&_edata,
494  ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
495 
496  (unsigned long)&_text, (unsigned long)&_etext,
497  ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
498 
499  mem_init_done = 1;
500 }
501 
502 void free_initmem(void)
503 {
504  unsigned long addr;
505 
506  addr = (unsigned long)(&__init_begin);
507  for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
508  ClearPageReserved(virt_to_page(addr));
509  init_page_count(virt_to_page(addr));
510  free_page(addr);
511  totalram_pages++;
512  }
513  printk("Freeing unused kernel memory: %ldk freed\n",
514  ((unsigned long)&__init_end -
515  (unsigned long)&__init_begin) >> 10);
516 }
517 
518 #ifdef CONFIG_BLK_DEV_INITRD
519 void free_initrd_mem(unsigned long start, unsigned long end)
520 {
521  unsigned long p;
522  for (p = start; p < end; p += PAGE_SIZE) {
523  ClearPageReserved(virt_to_page(p));
524  init_page_count(virt_to_page(p));
525  free_page(p);
526  totalram_pages++;
527  }
528  printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
529 }
530 #endif
531 
532 #ifdef CONFIG_MEMORY_HOTPLUG
533 int arch_add_memory(int nid, u64 start, u64 size)
534 {
535  pg_data_t *pgdat;
536  unsigned long start_pfn = start >> PAGE_SHIFT;
537  unsigned long nr_pages = size >> PAGE_SHIFT;
538  int ret;
539 
540  pgdat = NODE_DATA(nid);
541 
542  /* We only have ZONE_NORMAL, so this is easy.. */
543  ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
544  start_pfn, nr_pages);
545  if (unlikely(ret))
546  printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
547 
548  return ret;
549 }
551 
552 #ifdef CONFIG_NUMA
553 int memory_add_physaddr_to_nid(u64 addr)
554 {
555  /* Node 0 for now.. */
556  return 0;
557 }
558 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
559 #endif
560 
561 #endif /* CONFIG_MEMORY_HOTPLUG */