21 #include <linux/kernel.h>
23 #include <linux/elf.h>
25 #include <linux/errno.h>
28 #include <linux/sched.h>
29 #include <linux/signal.h>
30 #include <linux/slab.h>
34 #include <asm/cacheflush.h>
37 #include <asm/vdso_datapage.h>
40 static unsigned long vdso_pages;
41 static struct page **vdso_pagelist;
58 static int alloc_vectors_page(
void)
60 extern char __kuser_helper_start[], __kuser_helper_end[];
61 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
70 memcpy((
void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
74 memcpy((
void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
84 int aarch32_setup_vectors_page(
struct linux_binprm *bprm,
int uses_interp)
87 unsigned long addr = AARCH32_VECTORS_BASE;
91 current->mm->context.vdso = (
void *)addr;
95 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
104 static int __init vdso_init(
void)
111 pr_info(
"vdso: %ld pages (%ld code, %ld data) at base %p\n",
112 vdso_pages + 1, vdso_pages, 1L, &vdso_start);
115 vdso_pagelist = kzalloc(
sizeof(
struct page *) * (vdso_pages + 1),
117 if (vdso_pagelist ==
NULL) {
118 pr_err(
"Failed to allocate vDSO pagelist!\n");
123 for (i = 0; i < vdso_pages; i++) {
125 ClearPageReserved(pg);
127 vdso_pagelist[
i] =
pg;
131 vbase =
vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
133 pr_err(
"Failed to map vDSO pagelist!\n");
135 }
else if (
memcmp(vbase,
"\177ELF", 4)) {
136 pr_err(
"vDSO is not a valid ELF object!\n");
144 vdso_pagelist[
i] =
pg;
156 unsigned long vdso_base, vdso_mapping_len;
160 vdso_mapping_len = (vdso_pages + 1) <<
PAGE_SHIFT;
172 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
195 if (vma->
vm_start == AARCH32_VECTORS_BASE)
231 ++vdso_data->tb_seq_count;
235 vdso_data->use_syscall = use_syscall;
236 vdso_data->xtime_coarse_sec = xtime_coarse.
tv_sec;
237 vdso_data->xtime_coarse_nsec = xtime_coarse.
tv_nsec;
240 vdso_data->cs_cycle_last = tk->
clock->cycle_last;
243 vdso_data->cs_mult = tk->
mult;
244 vdso_data->cs_shift = tk->
shift;
250 ++vdso_data->tb_seq_count;
255 ++vdso_data->tb_seq_count;
260 ++vdso_data->tb_seq_count;