31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/capability.h>
34 #include <linux/errno.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/signal.h>
39 #include <linux/string.h>
43 #include <linux/ptrace.h>
44 #include <linux/audit.h>
45 #include <linux/stddef.h>
47 #include <asm/uaccess.h>
49 #include <asm/tlbflush.h>
51 #include <asm/syscalls.h>
69 #define KVM86 ((struct kernel_vm86_struct *)regs)
70 #define VMPI KVM86->vm86plus
76 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
77 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
78 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
79 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
84 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
85 #define VEFLAGS (current->thread.v86flags)
87 #define set_flags(X, new, mask) \
88 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
90 #define SAFE_MASK (0xDD5)
91 #define RETURN_MASK (0xDFF)
94 static int copy_vm86_regs_to_user(
struct vm86_regs __user *
user,
95 const struct kernel_vm86_regs *
regs)
105 sizeof(
struct kernel_vm86_regs) -
106 offsetof(
struct kernel_vm86_regs, pt.orig_ax));
112 static int copy_vm86_regs_from_user(
struct kernel_vm86_regs *
regs,
122 sizeof(
struct kernel_vm86_regs) -
123 offsetof(
struct kernel_vm86_regs, pt.orig_ax) +
141 if (!
current->thread.vm86_info) {
146 tmp = copy_vm86_regs_to_user(&
current->thread.vm86_info->regs, regs);
149 pr_alert(
"could not access userspace vm86_info\n");
156 load_sp0(tss, &
current->thread);
162 ret->fs =
current->thread.saved_fs;
163 set_user_gs(ret,
current->thread.saved_gs);
168 static void mark_screen_rdonly(
struct mm_struct *mm)
179 if (pgd_none_or_clear_bad(pgd))
182 if (pud_none_or_clear_bad(pud))
186 if (pmd_none_or_clear_bad(pmd))
188 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
189 for (i = 0; i < 32; i++) {
194 pte_unmap_unlock(pte, ptl);
202 static int do_vm86_irq_handling(
int subfunction,
int irqnumber);
203 static void do_sys_vm86(
struct kernel_vm86_struct *
info,
struct task_struct *tsk);
207 struct kernel_vm86_struct info;
216 if (tsk->
thread.saved_sp0)
218 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
219 offsetof(
struct kernel_vm86_struct, vm86plus) -
224 memset(&info.vm86plus, 0, (
int)&info.regs32 - (
int)&info.vm86plus);
226 tsk->
thread.vm86_info = v86;
227 do_sys_vm86(&info, tsk);
236 struct kernel_vm86_struct info;
251 ret = do_vm86_irq_handling(cmd, (
int)arg);
266 if (tsk->
thread.saved_sp0)
269 tmp = copy_vm86_regs_from_user(&info.regs, &v86->
regs,
270 offsetof(
struct kernel_vm86_struct, regs32) -
276 info.vm86plus.is_vm86pus = 1;
278 do_sys_vm86(&info, tsk);
285 static void do_sys_vm86(
struct kernel_vm86_struct *
info,
struct task_struct *tsk)
291 info->regs.pt.ds = 0;
292 info->regs.pt.es = 0;
293 info->regs.pt.fs = 0;
294 #ifndef CONFIG_X86_32_LAZY_GS
295 info->regs.pt.gs = 0;
305 info->regs.pt.flags |= info->regs32->flags & ~
SAFE_MASK;
306 info->regs.pt.flags |= X86_VM_MASK;
308 switch (info->cpu_type) {
328 tsk->
thread.saved_fs = info->regs32->fs;
329 tsk->
thread.saved_gs = get_user_gs(info->regs32);
332 tsk->
thread.sp0 = (
unsigned long) &info->VM86_TSS_ESP0;
334 tsk->
thread.sysenter_cs = 0;
335 load_sp0(tss, &tsk->
thread);
338 tsk->
thread.screen_bitmap = info->screen_bitmap;
340 mark_screen_rdonly(tsk->
mm);
343 #ifdef CONFIG_AUDITSYSCALL
351 #ifdef CONFIG_X86_32_LAZY_GS
354 "jmp resume_userspace"
360 static inline void return_to_32bit(
struct kernel_vm86_regs *regs16,
int retval)
366 __asm__ __volatile__(
"movl %0,%%esp\n\t"
368 "jmp resume_userspace"
372 static inline void set_IF(
struct kernel_vm86_regs *regs)
379 static inline void clear_IF(
struct kernel_vm86_regs *regs)
384 static inline void clear_TF(
struct kernel_vm86_regs *regs)
389 static inline void clear_AC(
struct kernel_vm86_regs *regs)
406 static inline void set_vflags_long(
unsigned long flags,
struct kernel_vm86_regs *regs)
416 static inline void set_vflags_short(
unsigned short flags,
struct kernel_vm86_regs *regs)
426 static inline unsigned long get_vflags(
struct kernel_vm86_regs *regs)
428 unsigned long flags = regs->pt.flags &
RETURN_MASK;
438 __asm__ __volatile__(
"btl %2,%1\n\tsbbl %0,%0"
440 :
"m" (*bitmap),
"r" (nr));
444 #define val_byte(val, n) (((__u8 *)&val)[n])
446 #define pushb(base, ptr, val, err_label) \
450 if (put_user(__val, base + ptr) < 0) \
454 #define pushw(base, ptr, val, err_label) \
458 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
461 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
465 #define pushl(base, ptr, val, err_label) \
469 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
472 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
475 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
478 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
482 #define popb(base, ptr, err_label) \
485 if (get_user(__res, base + ptr) < 0) \
491 #define popw(base, ptr, err_label) \
494 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
497 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
503 #define popl(base, ptr, err_label) \
506 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
509 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
512 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
515 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
526 static void do_int(
struct kernel_vm86_regs *regs,
int i,
527 unsigned char __user *
ssp,
unsigned short sp)
529 unsigned long __user *intr_ptr;
530 unsigned long segoffs;
534 if (is_revectored(i, &
KVM86->int_revectored))
536 if (i == 0x21 && is_revectored(
AH(regs), &
KVM86->int21_revectored))
538 intr_ptr = (
unsigned long __user *) (i << 2);
541 if ((segoffs >> 16) ==
BIOSSEG)
543 pushw(ssp, sp, get_vflags(regs), cannot_handle);
544 pushw(ssp, sp, regs->pt.cs, cannot_handle);
545 pushw(ssp, sp,
IP(regs), cannot_handle);
546 regs->pt.cs = segoffs >> 16;
548 IP(regs) = segoffs & 0xffff;
555 return_to_32bit(regs,
VM86_INTx + (i << 8));
560 if (
VMPI.is_vm86pus) {
561 if ((trapno == 3) || (trapno == 1)) {
569 do_int(regs, trapno, (
unsigned char __user *) (regs->pt.ss << 4),
SP(regs));
574 current->thread.trap_nr = trapno;
585 unsigned short ip,
sp, orig_flags;
586 int data32, pref_done;
588 #define CHECK_IF_IN_TRAP \
589 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
590 newflags |= X86_EFLAGS_TF
591 #define VM86_FAULT_RETURN do { \
592 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
593 return_to_32bit(regs, VM86_PICRETURN); \
594 if (orig_flags & X86_EFLAGS_TF) \
595 handle_vm86_trap(regs, 0, 1); \
598 orig_flags = *(
unsigned short *)®s->pt.flags;
600 csp = (
unsigned char __user *) (regs->pt.cs << 4);
601 ssp = (
unsigned char __user *) (regs->pt.ss << 4);
608 switch (opcode =
popb(csp, ip, simulate_sigsegv)) {
609 case 0x66: data32 = 1;
break;
619 default: pref_done = 1;
621 }
while (!pref_done);
628 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
631 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
640 unsigned long newflags;
642 newflags =
popl(ssp, sp, simulate_sigsegv);
645 newflags =
popw(ssp, sp, simulate_sigsegv);
651 set_vflags_long(newflags, regs);
653 set_vflags_short(newflags, regs);
660 int intno =
popb(csp, ip, simulate_sigsegv);
662 if (
VMPI.vm86dbg_active) {
663 if ((1 << (intno & 7)) &
VMPI.vm86dbg_intxxtab[intno >> 3])
664 return_to_32bit(regs,
VM86_INTx + (intno << 8));
666 do_int(regs, intno, ssp, sp);
675 unsigned long newflags;
677 newip =
popl(ssp, sp, simulate_sigsegv);
678 newcs =
popl(ssp, sp, simulate_sigsegv);
679 newflags =
popl(ssp, sp, simulate_sigsegv);
682 newip =
popw(ssp, sp, simulate_sigsegv);
683 newcs =
popw(ssp, sp, simulate_sigsegv);
684 newflags =
popw(ssp, sp, simulate_sigsegv);
691 set_vflags_long(newflags, regs);
693 set_vflags_short(newflags, regs);
738 #define VM86_IRQNAME "vm86irq"
740 static struct vm86_irqs {
748 #define ALLOWED_SIGS (1 \
749 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
758 irq_bit = 1 << intno;
759 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
762 if (vm86_irqs[intno].
sig)
763 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
769 spin_unlock_irqrestore(&irqbits_lock, flags);
773 spin_unlock_irqrestore(&irqbits_lock, flags);
777 static inline void free_vm86_irq(
int irqnumber)
782 vm86_irqs[irqnumber].tsk =
NULL;
785 irqbits &= ~(1 << irqnumber);
786 spin_unlock_irqrestore(&irqbits_lock, flags);
793 if (vm86_irqs[i].tsk == task)
797 static inline int get_and_reset_irq(
int irqnumber)
803 if (invalid_vm86_irq(irqnumber))
return 0;
804 if (vm86_irqs[irqnumber].tsk !=
current)
return 0;
806 bit = irqbits & (1 << irqnumber);
813 spin_unlock_irqrestore(&irqbits_lock, flags);
818 static int do_vm86_irq_handling(
int subfunction,
int irqnumber)
821 switch (subfunction) {
823 return get_and_reset_irq(irqnumber);
829 int sig = irqnumber >> 8;
830 int irq = irqnumber & 255;
833 if (invalid_vm86_irq(irq))
return -
EPERM;
834 if (vm86_irqs[irq].tsk)
return -
EPERM;
837 vm86_irqs[irq].sig =
sig;
842 if (invalid_vm86_irq(irqnumber))
return -
EPERM;
843 if (!vm86_irqs[irqnumber].tsk)
return 0;
845 free_vm86_irq(irqnumber);