Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
init.c
Go to the documentation of this file.
1 /*
2  * S390 version
3  * Copyright IBM Corp. 1999
4  * Author(s): Hartmut Penner ([email protected])
5  *
6  * Derived from "arch/i386/mm/init.c"
7  * Copyright (C) 1995 Linus Torvalds
8  */
9 
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/initrd.h>
27 #include <linux/export.h>
28 #include <linux/gfp.h>
29 #include <asm/processor.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
33 #include <asm/dma.h>
34 #include <asm/lowcore.h>
35 #include <asm/tlb.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sections.h>
38 #include <asm/ctl_reg.h>
39 
41 
43 EXPORT_SYMBOL(empty_zero_page);
44 
45 static unsigned long __init setup_zero_pages(void)
46 {
47  struct cpuid cpu_id;
48  unsigned int order;
49  unsigned long size;
50  struct page *page;
51  int i;
52 
53  get_cpu_id(&cpu_id);
54  switch (cpu_id.machine) {
55  case 0x9672: /* g5 */
56  case 0x2064: /* z900 */
57  case 0x2066: /* z900 */
58  case 0x2084: /* z990 */
59  case 0x2086: /* z990 */
60  case 0x2094: /* z9-109 */
61  case 0x2096: /* z9-109 */
62  order = 0;
63  break;
64  case 0x2097: /* z10 */
65  case 0x2098: /* z10 */
66  default:
67  order = 2;
68  break;
69  }
70 
71  empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72  if (!empty_zero_page)
73  panic("Out of memory in setup_zero_pages");
74 
75  page = virt_to_page((void *) empty_zero_page);
76  split_page(page, order);
77  for (i = 1 << order; i > 0; i--) {
78  SetPageReserved(page);
79  page++;
80  }
81 
82  size = PAGE_SIZE << order;
83  zero_page_mask = (size - 1) & PAGE_MASK;
84 
85  return 1UL << order;
86 }
87 
88 /*
89  * paging_init() sets up the page tables
90  */
91 void __init paging_init(void)
92 {
93  unsigned long max_zone_pfns[MAX_NR_ZONES];
94  unsigned long pgd_type, asce_bits;
95 
96  init_mm.pgd = swapper_pg_dir;
97 #ifdef CONFIG_64BIT
98  if (VMALLOC_END > (1UL << 42)) {
99  asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
100  pgd_type = _REGION2_ENTRY_EMPTY;
101  } else {
102  asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
103  pgd_type = _REGION3_ENTRY_EMPTY;
104  }
105 #else
106  asce_bits = _ASCE_TABLE_LENGTH;
107  pgd_type = _SEGMENT_ENTRY_EMPTY;
108 #endif
109  S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
110  clear_table((unsigned long *) init_mm.pgd, pgd_type,
111  sizeof(unsigned long)*2048);
112  vmem_map_init();
113 
114  /* enable virtual mapping in kernel mode */
115  __ctl_load(S390_lowcore.kernel_asce, 1, 1);
116  __ctl_load(S390_lowcore.kernel_asce, 7, 7);
117  __ctl_load(S390_lowcore.kernel_asce, 13, 13);
119 
120  atomic_set(&init_mm.context.attach_count, 1);
121 
122  sparse_memory_present_with_active_regions(MAX_NUMNODES);
123  sparse_init();
124  memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
125  max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
126  max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
127  free_area_init_nodes(max_zone_pfns);
128  fault_init();
129 }
130 
131 void __init mem_init(void)
132 {
133  unsigned long codesize, reservedpages, datasize, initsize;
134 
136  high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
137 
138  /* Setup guest page hinting */
139  cmma_init();
140 
141  /* this will put all low memory onto the freelists */
142  totalram_pages += free_all_bootmem();
143  totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
144 
145  reservedpages = 0;
146 
147  codesize = (unsigned long) &_etext - (unsigned long) &_text;
148  datasize = (unsigned long) &_edata - (unsigned long) &_etext;
149  initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
150  printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
151  nr_free_pages() << (PAGE_SHIFT-10),
152  max_mapnr << (PAGE_SHIFT-10),
153  codesize >> 10,
154  reservedpages << (PAGE_SHIFT-10),
155  datasize >>10,
156  initsize >> 10);
157  printk("Write protected kernel read-only data: %#lx - %#lx\n",
158  (unsigned long)&_stext,
159  PFN_ALIGN((unsigned long)&_eshared) - 1);
160 }
161 
162 #ifdef CONFIG_DEBUG_PAGEALLOC
163 void kernel_map_pages(struct page *page, int numpages, int enable)
164 {
165  pgd_t *pgd;
166  pud_t *pud;
167  pmd_t *pmd;
168  pte_t *pte;
169  unsigned long address;
170  int i;
171 
172  for (i = 0; i < numpages; i++) {
173  address = page_to_phys(page + i);
174  pgd = pgd_offset_k(address);
175  pud = pud_offset(pgd, address);
176  pmd = pmd_offset(pud, address);
177  pte = pte_offset_kernel(pmd, address);
178  if (!enable) {
179  __ptep_ipte(address, pte);
180  pte_val(*pte) = _PAGE_TYPE_EMPTY;
181  continue;
182  }
183  *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
184  /* Flush cpu write queue. */
185  mb();
186  }
187 }
188 #endif
189 
190 void free_init_pages(char *what, unsigned long begin, unsigned long end)
191 {
192  unsigned long addr = begin;
193 
194  if (begin >= end)
195  return;
196  for (; addr < end; addr += PAGE_SIZE) {
197  ClearPageReserved(virt_to_page(addr));
198  init_page_count(virt_to_page(addr));
199  memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
200  PAGE_SIZE);
201  free_page(addr);
202  totalram_pages++;
203  }
204  printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
205 }
206 
207 void free_initmem(void)
208 {
209  free_init_pages("unused kernel memory",
210  (unsigned long)&__init_begin,
211  (unsigned long)&__init_end);
212 }
213 
214 #ifdef CONFIG_BLK_DEV_INITRD
215 void __init free_initrd_mem(unsigned long start, unsigned long end)
216 {
217  free_init_pages("initrd memory", start, end);
218 }
219 #endif
220 
221 #ifdef CONFIG_MEMORY_HOTPLUG
222 int arch_add_memory(int nid, u64 start, u64 size)
223 {
224  unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
225  unsigned long start_pfn = PFN_DOWN(start);
226  unsigned long size_pages = PFN_DOWN(size);
227  struct zone *zone;
228  int rc;
229 
230  rc = vmem_add_mapping(start, size);
231  if (rc)
232  return rc;
233  for_each_zone(zone) {
234  if (zone_idx(zone) != ZONE_MOVABLE) {
235  /* Add range within existing zone limits */
236  zone_start_pfn = zone->zone_start_pfn;
237  zone_end_pfn = zone->zone_start_pfn +
238  zone->spanned_pages;
239  } else {
240  /* Add remaining range to ZONE_MOVABLE */
241  zone_start_pfn = start_pfn;
242  zone_end_pfn = start_pfn + size_pages;
243  }
244  if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
245  continue;
246  nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
247  zone_end_pfn - start_pfn : size_pages;
248  rc = __add_pages(nid, zone, start_pfn, nr_pages);
249  if (rc)
250  break;
251  start_pfn += nr_pages;
252  size_pages -= nr_pages;
253  if (!size_pages)
254  break;
255  }
256  if (rc)
257  vmem_remove_mapping(start, size);
258  return rc;
259 }
260 #endif /* CONFIG_MEMORY_HOTPLUG */