Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vdso32-setup.c
Go to the documentation of this file.
1 /*
2  * (C) Copyright 2002 Linus Torvalds
3  * Portions based on the vdso-randomization code from exec-shield:
4  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5  *
6  * This file contains the needed initializations to support sysenter.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 
20 #include <asm/cpufeature.h>
21 #include <asm/msr.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
24 #include <asm/elf.h>
25 #include <asm/tlbflush.h>
26 #include <asm/vdso.h>
27 #include <asm/proto.h>
28 
29 enum {
33 };
34 
35 #ifdef CONFIG_COMPAT_VDSO
36 #define VDSO_DEFAULT VDSO_COMPAT
37 #else
38 #define VDSO_DEFAULT VDSO_ENABLED
39 #endif
40 
41 #ifdef CONFIG_X86_64
42 #define vdso_enabled sysctl_vsyscall32
43 #define arch_setup_additional_pages syscall32_setup_pages
44 #endif
45 
46 /*
47  * This is the difference between the prelinked addresses in the vDSO images
48  * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
49  * in the user address space.
50  */
51 #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
52 
53 /*
54  * Should the kernel map a VDSO page into processes and pass its
55  * address down to glibc upon exec()?
56  */
58 
59 static int __init vdso_setup(char *s)
60 {
62 
63  return 1;
64 }
65 
66 /*
67  * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
68  * behavior on both 64-bit and 32-bit kernels.
69  * On 32-bit kernels, vdso=[012] means the same thing.
70  */
71 __setup("vdso32=", vdso_setup);
72 
73 #ifdef CONFIG_X86_32
74 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
75 
77 #endif
78 
79 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
80  unsigned offset, unsigned size)
81 {
82  Elf32_Sym *sym = (void *)ehdr + offset;
83  unsigned nsym = size / sizeof(*sym);
84  unsigned i;
85 
86  for(i = 0; i < nsym; i++, sym++) {
87  if (sym->st_shndx == SHN_UNDEF ||
88  sym->st_shndx == SHN_ABS)
89  continue; /* skip */
90 
91  if (sym->st_shndx > SHN_LORESERVE) {
92  printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
93  sym->st_shndx);
94  continue;
95  }
96 
97  switch(ELF_ST_TYPE(sym->st_info)) {
98  case STT_OBJECT:
99  case STT_FUNC:
100  case STT_SECTION:
101  case STT_FILE:
102  sym->st_value += VDSO_ADDR_ADJUST;
103  }
104  }
105 }
106 
107 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
108 {
109  Elf32_Dyn *dyn = (void *)ehdr + offset;
110 
111  for(; dyn->d_tag != DT_NULL; dyn++)
112  switch(dyn->d_tag) {
113  case DT_PLTGOT:
114  case DT_HASH:
115  case DT_STRTAB:
116  case DT_SYMTAB:
117  case DT_RELA:
118  case DT_INIT:
119  case DT_FINI:
120  case DT_REL:
121  case DT_DEBUG:
122  case DT_JMPREL:
123  case DT_VERSYM:
124  case DT_VERDEF:
125  case DT_VERNEED:
126  case DT_ADDRRNGLO ... DT_ADDRRNGHI:
127  /* definitely pointers needing relocation */
128  dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
129  break;
130 
131  case DT_ENCODING ... OLD_DT_LOOS-1:
132  case DT_LOOS ... DT_HIOS-1:
133  /* Tags above DT_ENCODING are pointers if
134  they're even */
135  if (dyn->d_tag >= DT_ENCODING &&
136  (dyn->d_tag & 1) == 0)
137  dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
138  break;
139 
140  case DT_VERDEFNUM:
141  case DT_VERNEEDNUM:
142  case DT_FLAGS_1:
143  case DT_RELACOUNT:
144  case DT_RELCOUNT:
145  case DT_VALRNGLO ... DT_VALRNGHI:
146  /* definitely not pointers */
147  break;
148 
149  case OLD_DT_LOOS ... DT_LOOS-1:
150  case DT_HIOS ... DT_VALRNGLO-1:
151  default:
152  if (dyn->d_tag > DT_ENCODING)
153  printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
154  dyn->d_tag);
155  break;
156  }
157 }
158 
159 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
160 {
161  Elf32_Phdr *phdr;
162  Elf32_Shdr *shdr;
163  int i;
164 
165  BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
166  !elf_check_arch_ia32(ehdr) ||
167  ehdr->e_type != ET_DYN);
168 
169  ehdr->e_entry += VDSO_ADDR_ADJUST;
170 
171  /* rebase phdrs */
172  phdr = (void *)ehdr + ehdr->e_phoff;
173  for (i = 0; i < ehdr->e_phnum; i++) {
174  phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
175 
176  /* relocate dynamic stuff */
177  if (phdr[i].p_type == PT_DYNAMIC)
178  reloc_dyn(ehdr, phdr[i].p_offset);
179  }
180 
181  /* rebase sections */
182  shdr = (void *)ehdr + ehdr->e_shoff;
183  for(i = 0; i < ehdr->e_shnum; i++) {
184  if (!(shdr[i].sh_flags & SHF_ALLOC))
185  continue;
186 
187  shdr[i].sh_addr += VDSO_ADDR_ADJUST;
188 
189  if (shdr[i].sh_type == SHT_SYMTAB ||
190  shdr[i].sh_type == SHT_DYNSYM)
191  reloc_symtab(ehdr, shdr[i].sh_offset,
192  shdr[i].sh_size);
193  }
194 }
195 
196 static struct page *vdso32_pages[1];
197 
198 #ifdef CONFIG_X86_64
199 
200 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
201 #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
202 
203 /* May not be __init: called during resume */
204 void syscall32_cpu_init(void)
205 {
206  /* Load these always in case some future AMD CPU supports
207  SYSENTER from compat mode too. */
208  wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
209  wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
211 
212  wrmsrl(MSR_CSTAR, ia32_cstar_target);
213 }
214 
215 #define compat_uses_vma 1
216 
217 static inline void map_compat_vdso(int map)
218 {
219 }
220 
221 #else /* CONFIG_X86_32 */
222 
223 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
224 #define vdso32_syscall() (0)
225 
226 void enable_sep_cpu(void)
227 {
228  int cpu = get_cpu();
229  struct tss_struct *tss = &per_cpu(init_tss, cpu);
230 
231  if (!boot_cpu_has(X86_FEATURE_SEP)) {
232  put_cpu();
233  return;
234  }
235 
236  tss->x86_tss.ss1 = __KERNEL_CS;
237  tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
239  wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
240  wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
241  put_cpu();
242 }
243 
244 static struct vm_area_struct gate_vma;
245 
246 static int __init gate_vma_init(void)
247 {
248  gate_vma.vm_mm = NULL;
249  gate_vma.vm_start = FIXADDR_USER_START;
250  gate_vma.vm_end = FIXADDR_USER_END;
251  gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
252  gate_vma.vm_page_prot = __P101;
253 
254  return 0;
255 }
256 
257 #define compat_uses_vma 0
258 
259 static void map_compat_vdso(int map)
260 {
261  static int vdso_mapped;
262 
263  if (map == vdso_mapped)
264  return;
265 
266  vdso_mapped = map;
267 
268  __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
269  map ? PAGE_READONLY_EXEC : PAGE_NONE);
270 
271  /* flush stray tlbs */
272  flush_tlb_all();
273 }
274 
275 #endif /* CONFIG_X86_64 */
276 
278 {
279  void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
280  const void *vsyscall;
281  size_t vsyscall_len;
282 
283  vdso32_pages[0] = virt_to_page(syscall_page);
284 
285 #ifdef CONFIG_X86_32
286  gate_vma_init();
287 #endif
288 
289  if (vdso32_syscall()) {
290  vsyscall = &vdso32_syscall_start;
291  vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start;
292  } else if (vdso32_sysenter()){
293  vsyscall = &vdso32_sysenter_start;
294  vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
295  } else {
296  vsyscall = &vdso32_int80_start;
297  vsyscall_len = &vdso32_int80_end - &vdso32_int80_start;
298  }
299 
300  memcpy(syscall_page, vsyscall, vsyscall_len);
301  relocate_vdso(syscall_page);
302 
303  return 0;
304 }
305 
306 /* Setup a VMA at program startup for the vsyscall page */
307 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
308 {
309  struct mm_struct *mm = current->mm;
310  unsigned long addr;
311  int ret = 0;
312  bool compat;
313 
314 #ifdef CONFIG_X86_X32_ABI
315  if (test_thread_flag(TIF_X32))
316  return x32_setup_additional_pages(bprm, uses_interp);
317 #endif
318 
320  return 0;
321 
322  down_write(&mm->mmap_sem);
323 
324  /* Test compat mode once here, in case someone
325  changes it via sysctl */
326  compat = (vdso_enabled == VDSO_COMPAT);
327 
328  map_compat_vdso(compat);
329 
330  if (compat)
331  addr = VDSO_HIGH_BASE;
332  else {
333  addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
334  if (IS_ERR_VALUE(addr)) {
335  ret = addr;
336  goto up_fail;
337  }
338  }
339 
340  current->mm->context.vdso = (void *)addr;
341 
342  if (compat_uses_vma || !compat) {
343  /*
344  * MAYWRITE to allow gdb to COW and set breakpoints
345  */
346  ret = install_special_mapping(mm, addr, PAGE_SIZE,
347  VM_READ|VM_EXEC|
348  VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
349  vdso32_pages);
350 
351  if (ret)
352  goto up_fail;
353  }
354 
355  current_thread_info()->sysenter_return =
356  VDSO32_SYMBOL(addr, SYSENTER_RETURN);
357 
358  up_fail:
359  if (ret)
360  current->mm->context.vdso = NULL;
361 
362  up_write(&mm->mmap_sem);
363 
364  return ret;
365 }
366 
367 #ifdef CONFIG_X86_64
368 
370 
371 #ifdef CONFIG_SYSCTL
372 /* Register vsyscall32 into the ABI table */
373 #include <linux/sysctl.h>
374 
375 static ctl_table abi_table2[] = {
376  {
377  .procname = "vsyscall32",
378  .data = &sysctl_vsyscall32,
379  .maxlen = sizeof(int),
380  .mode = 0644,
382  },
383  {}
384 };
385 
386 static ctl_table abi_root_table2[] = {
387  {
388  .procname = "abi",
389  .mode = 0555,
390  .child = abi_table2
391  },
392  {}
393 };
394 
395 static __init int ia32_binfmt_init(void)
396 {
397  register_sysctl_table(abi_root_table2);
398  return 0;
399 }
400 __initcall(ia32_binfmt_init);
401 #endif
402 
403 #else /* CONFIG_X86_32 */
404 
405 const char *arch_vma_name(struct vm_area_struct *vma)
406 {
407  if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
408  return "[vdso]";
409  return NULL;
410 }
411 
413 {
414  /*
415  * Check to see if the corresponding task was created in compat vdso
416  * mode.
417  */
418  if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
419  return &gate_vma;
420  return NULL;
421 }
422 
423 int in_gate_area(struct mm_struct *mm, unsigned long addr)
424 {
425  const struct vm_area_struct *vma = get_gate_vma(mm);
426 
427  return vma && addr >= vma->vm_start && addr < vma->vm_end;
428 }
429 
430 int in_gate_area_no_mm(unsigned long addr)
431 {
432  return 0;
433 }
434 
435 #endif /* CONFIG_X86_64 */