Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
setup.c
Go to the documentation of this file.
1 /*
2  * Machine specific setup for xen
3  *
4  * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
5  */
6 
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pm.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
14 
15 #include <asm/elf.h>
16 #include <asm/vdso.h>
17 #include <asm/e820.h>
18 #include <asm/setup.h>
19 #include <asm/acpi.h>
20 #include <asm/numa.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
23 
24 #include <xen/xen.h>
25 #include <xen/page.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
30 #include "xen-ops.h"
31 #include "vdso.h"
32 
33 /* These are code, but not functions. Defined in entry.S */
34 extern const char xen_hypervisor_callback[];
35 extern const char xen_failsafe_callback[];
36 extern void xen_sysenter_target(void);
37 extern void xen_syscall_target(void);
38 extern void xen_syscall32_target(void);
39 
40 /* Amount of extra memory space we add to the e820 ranges */
42 
43 /* Number of pages released from the initial allocation. */
44 unsigned long xen_released_pages;
45 
46 /*
47  * The maximum amount of extra memory compared to the base size. The
48  * main scaling factor is the size of struct page. At extreme ratios
49  * of base:extra, all the base memory can be filled with page
50  * structures for the extra memory, leaving no space for anything
51  * else.
52  *
53  * 10x seems like a reasonable balance between scaling flexibility and
54  * leaving a practically usable system.
55  */
56 #define EXTRA_MEM_RATIO (10)
57 
58 static void __init xen_add_extra_mem(u64 start, u64 size)
59 {
60  unsigned long pfn;
61  int i;
62 
63  for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
64  /* Add new region. */
65  if (xen_extra_mem[i].size == 0) {
66  xen_extra_mem[i].start = start;
67  xen_extra_mem[i].size = size;
68  break;
69  }
70  /* Append to existing region. */
71  if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
72  xen_extra_mem[i].size += size;
73  break;
74  }
75  }
76  if (i == XEN_EXTRA_MEM_MAX_REGIONS)
77  printk(KERN_WARNING "Warning: not enough extra memory regions\n");
78 
79  memblock_reserve(start, size);
80 
81  xen_max_p2m_pfn = PFN_DOWN(start + size);
82  for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
83  unsigned long mfn = pfn_to_mfn(pfn);
84 
85  if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
86  continue;
87  WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
88  pfn, mfn);
89 
91  }
92 }
93 
94 static unsigned long __init xen_do_chunk(unsigned long start,
95  unsigned long end, bool release)
96 {
97  struct xen_memory_reservation reservation = {
98  .address_bits = 0,
99  .extent_order = 0,
100  .domid = DOMID_SELF
101  };
102  unsigned long len = 0;
103  unsigned long pfn;
104  int ret;
105 
106  for (pfn = start; pfn < end; pfn++) {
107  unsigned long frame;
108  unsigned long mfn = pfn_to_mfn(pfn);
109 
110  if (release) {
111  /* Make sure pfn exists to start with */
112  if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
113  continue;
114  frame = mfn;
115  } else {
116  if (mfn != INVALID_P2M_ENTRY)
117  continue;
118  frame = pfn;
119  }
120  set_xen_guest_handle(reservation.extent_start, &frame);
121  reservation.nr_extents = 1;
122 
124  &reservation);
125  WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
126  release ? "release" : "populate", pfn, ret);
127 
128  if (ret == 1) {
129  if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
130  if (release)
131  break;
132  set_xen_guest_handle(reservation.extent_start, &frame);
133  reservation.nr_extents = 1;
135  &reservation);
136  break;
137  }
138  len++;
139  } else
140  break;
141  }
142  if (len)
143  printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
144  release ? "Freeing" : "Populating",
145  start, end, len,
146  release ? "freed" : "added");
147 
148  return len;
149 }
150 
151 static unsigned long __init xen_release_chunk(unsigned long start,
152  unsigned long end)
153 {
154  return xen_do_chunk(start, end, true);
155 }
156 
157 static unsigned long __init xen_populate_chunk(
158  const struct e820entry *list, size_t map_size,
159  unsigned long max_pfn, unsigned long *last_pfn,
160  unsigned long credits_left)
161 {
162  const struct e820entry *entry;
163  unsigned int i;
164  unsigned long done = 0;
165  unsigned long dest_pfn;
166 
167  for (i = 0, entry = list; i < map_size; i++, entry++) {
168  unsigned long s_pfn;
169  unsigned long e_pfn;
170  unsigned long pfns;
171  long capacity;
172 
173  if (credits_left <= 0)
174  break;
175 
176  if (entry->type != E820_RAM)
177  continue;
178 
179  e_pfn = PFN_DOWN(entry->addr + entry->size);
180 
181  /* We only care about E820 after the xen_start_info->nr_pages */
182  if (e_pfn <= max_pfn)
183  continue;
184 
185  s_pfn = PFN_UP(entry->addr);
186  /* If the E820 falls within the nr_pages, we want to start
187  * at the nr_pages PFN.
188  * If that would mean going past the E820 entry, skip it
189  */
190  if (s_pfn <= max_pfn) {
191  capacity = e_pfn - max_pfn;
192  dest_pfn = max_pfn;
193  } else {
194  capacity = e_pfn - s_pfn;
195  dest_pfn = s_pfn;
196  }
197 
198  if (credits_left < capacity)
199  capacity = credits_left;
200 
201  pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
202  done += pfns;
203  *last_pfn = (dest_pfn + pfns);
204  if (pfns < capacity)
205  break;
206  credits_left -= pfns;
207  }
208  return done;
209 }
210 
211 static void __init xen_set_identity_and_release_chunk(
212  unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
213  unsigned long *released, unsigned long *identity)
214 {
215  unsigned long pfn;
216 
217  /*
218  * If the PFNs are currently mapped, the VA mapping also needs
219  * to be updated to be 1:1.
220  */
221  for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
223  (unsigned long)__va(pfn << PAGE_SHIFT),
224  mfn_pte(pfn, PAGE_KERNEL_IO), 0);
225 
226  if (start_pfn < nr_pages)
227  *released += xen_release_chunk(
228  start_pfn, min(end_pfn, nr_pages));
229 
230  *identity += set_phys_range_identity(start_pfn, end_pfn);
231 }
232 
233 static unsigned long __init xen_set_identity_and_release(
234  const struct e820entry *list, size_t map_size, unsigned long nr_pages)
235 {
236  phys_addr_t start = 0;
237  unsigned long released = 0;
238  unsigned long identity = 0;
239  const struct e820entry *entry;
240  int i;
241 
242  /*
243  * Combine non-RAM regions and gaps until a RAM region (or the
244  * end of the map) is reached, then set the 1:1 map and
245  * release the pages (if available) in those non-RAM regions.
246  *
247  * The combined non-RAM regions are rounded to a whole number
248  * of pages so any partial pages are accessible via the 1:1
249  * mapping. This is needed for some BIOSes that put (for
250  * example) the DMI tables in a reserved region that begins on
251  * a non-page boundary.
252  */
253  for (i = 0, entry = list; i < map_size; i++, entry++) {
254  phys_addr_t end = entry->addr + entry->size;
255  if (entry->type == E820_RAM || i == map_size - 1) {
256  unsigned long start_pfn = PFN_DOWN(start);
257  unsigned long end_pfn = PFN_UP(end);
258 
259  if (entry->type == E820_RAM)
260  end_pfn = PFN_UP(entry->addr);
261 
262  if (start_pfn < end_pfn)
263  xen_set_identity_and_release_chunk(
264  start_pfn, end_pfn, nr_pages,
265  &released, &identity);
266 
267  start = end;
268  }
269  }
270 
271  if (released)
272  printk(KERN_INFO "Released %lu pages of unused memory\n", released);
273  if (identity)
274  printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
275 
276  return released;
277 }
278 
279 static unsigned long __init xen_get_max_pages(void)
280 {
281  unsigned long max_pages = MAX_DOMAIN_PAGES;
283  int ret;
284 
285  /*
286  * For the initial domain we use the maximum reservation as
287  * the maximum page.
288  *
289  * For guest domains the current maximum reservation reflects
290  * the current maximum rather than the static maximum. In this
291  * case the e820 map provided to us will cover the static
292  * maximum region.
293  */
294  if (xen_initial_domain()) {
296  if (ret > 0)
297  max_pages = ret;
298  }
299 
300  return min(max_pages, MAX_DOMAIN_PAGES);
301 }
302 
303 static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
304 {
305  u64 end = start + size;
306 
307  /* Align RAM regions to page boundaries. */
308  if (type == E820_RAM) {
309  start = PAGE_ALIGN(start);
310  end &= ~((u64)PAGE_SIZE - 1);
311  }
312 
313  e820_add_region(start, end - start, type);
314 }
315 
320 {
321  static struct e820entry map[E820MAX] __initdata;
322 
323  unsigned long max_pfn = xen_start_info->nr_pages;
324  unsigned long long mem_end;
325  int rc;
326  struct xen_memory_map memmap;
327  unsigned long max_pages;
328  unsigned long last_pfn = 0;
329  unsigned long extra_pages = 0;
330  unsigned long populated;
331  int i;
332  int op;
333 
334  max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
335  mem_end = PFN_PHYS(max_pfn);
336 
337  memmap.nr_entries = E820MAX;
338  set_xen_guest_handle(memmap.buffer, map);
339 
340  op = xen_initial_domain() ?
343  rc = HYPERVISOR_memory_op(op, &memmap);
344  if (rc == -ENOSYS) {
346  memmap.nr_entries = 1;
347  map[0].addr = 0ULL;
348  map[0].size = mem_end;
349  /* 8MB slack (to balance backend allocations). */
350  map[0].size += 8ULL << 20;
351  map[0].type = E820_RAM;
352  rc = 0;
353  }
354  BUG_ON(rc);
355 
356  /* Make sure the Xen-supplied memory map is well-ordered. */
357  sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
358 
359  max_pages = xen_get_max_pages();
360  if (max_pages > max_pfn)
361  extra_pages += max_pages - max_pfn;
362 
363  /*
364  * Set P2M for all non-RAM pages and E820 gaps to be identity
365  * type PFNs. Any RAM pages that would be made inaccesible by
366  * this are first released.
367  */
368  xen_released_pages = xen_set_identity_and_release(
369  map, memmap.nr_entries, max_pfn);
370 
371  /*
372  * Populate back the non-RAM pages and E820 gaps that had been
373  * released. */
374  populated = xen_populate_chunk(map, memmap.nr_entries,
375  max_pfn, &last_pfn, xen_released_pages);
376 
377  xen_released_pages -= populated;
378  extra_pages += xen_released_pages;
379 
380  if (last_pfn > max_pfn) {
381  max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
382  mem_end = PFN_PHYS(max_pfn);
383  }
384  /*
385  * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
386  * factor the base size. On non-highmem systems, the base
387  * size is the full initial memory allocation; on highmem it
388  * is limited to the max size of lowmem, so that it doesn't
389  * get completely filled.
390  *
391  * In principle there could be a problem in lowmem systems if
392  * the initial memory is also very large with respect to
393  * lowmem, but we won't try to deal with that here.
394  */
395  extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
396  extra_pages);
397  i = 0;
398  while (i < memmap.nr_entries) {
399  u64 addr = map[i].addr;
400  u64 size = map[i].size;
401  u32 type = map[i].type;
402 
403  if (type == E820_RAM) {
404  if (addr < mem_end) {
405  size = min(size, mem_end - addr);
406  } else if (extra_pages) {
407  size = min(size, (u64)extra_pages * PAGE_SIZE);
408  extra_pages -= size / PAGE_SIZE;
409  xen_add_extra_mem(addr, size);
410  } else
411  type = E820_UNUSABLE;
412  }
413 
414  xen_align_and_add_e820_region(addr, size, type);
415 
416  map[i].addr += size;
417  map[i].size -= size;
418  if (map[i].size == 0)
419  i++;
420  }
421 
422  /*
423  * In domU, the ISA region is normal, usable memory, but we
424  * reserve ISA memory anyway because too many things poke
425  * about in there.
426  */
428  E820_RESERVED);
429 
430  /*
431  * Reserve Xen bits:
432  * - mfn_list
433  * - xen_start_info
434  * See comment above "struct start_info" in <xen/interface/xen.h>
435  * We tried to make the the memblock_reserve more selective so
436  * that it would be clear what region is reserved. Sadly we ran
437  * in the problem wherein on a 64-bit hypervisor with a 32-bit
438  * initial domain, the pt_base has the cr3 value which is not
439  * neccessarily where the pagetable starts! As Jan put it: "
440  * Actually, the adjustment turns out to be correct: The page
441  * tables for a 32-on-64 dom0 get allocated in the order "first L1",
442  * "first L2", "first L3", so the offset to the page table base is
443  * indeed 2. When reading xen/include/public/xen.h's comment
444  * very strictly, this is not a violation (since there nothing is said
445  * that the first thing in the page table space is pointed to by
446  * pt_base; I admit that this seems to be implied though, namely
447  * do I think that it is implied that the page table space is the
448  * range [pt_base, pt_base + nt_pt_frames), whereas that
449  * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
450  * which - without a priori knowledge - the kernel would have
451  * difficulty to figure out)." - so lets just fall back to the
452  * easy way and reserve the whole region.
453  */
455  xen_start_info->pt_base - xen_start_info->mfn_list);
456 
457  sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
458 
459  return "Xen";
460 }
461 
462 /*
463  * Set the bit indicating "nosegneg" library variants should be used.
464  * We only need to bother in pure 32-bit mode; compat 32-bit processes
465  * can have un-truncated segments, so wrapping around is allowed.
466  */
467 static void __init fiddle_vdso(void)
468 {
469 #ifdef CONFIG_X86_32
470  u32 *mask;
471  mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
472  *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
473  mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
474  *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
475 #endif
476 }
477 
478 static int __cpuinit register_callback(unsigned type, const void *func)
479 {
480  struct callback_register callback = {
481  .type = type,
482  .address = XEN_CALLBACK(__KERNEL_CS, func),
483  .flags = CALLBACKF_mask_events,
484  };
485 
486  return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
487 }
488 
490 {
491  int ret;
492  unsigned sysenter_feature;
493 
494 #ifdef CONFIG_X86_32
495  sysenter_feature = X86_FEATURE_SEP;
496 #else
497  sysenter_feature = X86_FEATURE_SYSENTER32;
498 #endif
499 
500  if (!boot_cpu_has(sysenter_feature))
501  return;
502 
503  ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
504  if(ret != 0)
505  setup_clear_cpu_cap(sysenter_feature);
506 }
507 
509 {
510 #ifdef CONFIG_X86_64
511  int ret;
512 
513  ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
514  if (ret != 0) {
515  printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
516  /* Pretty fatal; 64-bit userspace has no other
517  mechanism for syscalls. */
518  }
519 
520  if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
521  ret = register_callback(CALLBACKTYPE_syscall32,
523  if (ret != 0)
524  setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
525  }
526 #endif /* CONFIG_X86_64 */
527 }
528 
530 {
532 
533  HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
534  HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
535 
536  if (!xen_feature(XENFEAT_auto_translated_physmap))
537  HYPERVISOR_vm_assist(VMASST_CMD_enable,
539 
540  if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
541  register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
542  BUG();
543 
546 
547 #ifdef CONFIG_ACPI
548  if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
549  printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
550  disable_acpi();
551  }
552 #endif
553 
557 
558  /* Set up idle, making sure it calls safe_halt() pvop */
559 #ifdef CONFIG_X86_32
560  boot_cpu_data.hlt_works_ok = 1;
561 #endif
562  disable_cpuidle();
563  disable_cpufreq();
565  fiddle_vdso();
566 #ifdef CONFIG_NUMA
567  numa_off = 1;
568 #endif
569 }