7 #include <linux/kernel.h>
8 #include <linux/sched.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/ptrace.h>
17 #include <linux/elf.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
25 #include <linux/module.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/processor.h>
37 #include <asm/hw_breakpoint.h>
38 #include <asm/traps.h>
42 #define CREATE_TRACE_POINTS
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define REG_OFFSET_END {.name = NULL, .offset = 0}
106 for (roff = regoffset_table; roff->
name !=
NULL; roff++)
122 for (roff = regoffset_table; roff->
name !=
NULL; roff++)
123 if (roff->
offset == offset)
128 static const int arg_offs_table[] = {
151 #define FLAG_MASK_32 ((unsigned long) \
152 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
153 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
154 X86_EFLAGS_SF | X86_EFLAGS_TF | \
155 X86_EFLAGS_DF | X86_EFLAGS_OF | \
156 X86_EFLAGS_RF | X86_EFLAGS_AC))
161 static inline bool invalid_selector(
u16 value)
168 #define FLAG_MASK FLAG_MASK_32
185 unsigned long sp = (
unsigned long)®s->
sp;
192 if (
tinfo->previous_esp)
193 return tinfo->previous_esp;
195 return (
unsigned long)
regs;
199 static unsigned long *pt_regs_access(
struct pt_regs *regs,
unsigned long regno)
202 return ®s->bx + (regno >> 2);
217 retval = task_user_gs(task);
222 static int set_segment_reg(
struct task_struct *task,
228 if (invalid_selector(value))
254 task_user_gs(task) =
value;
262 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
264 static unsigned long *pt_regs_access(
struct pt_regs *regs,
unsigned long offset)
267 return ®s->
r15 + (offset /
sizeof(regs->
r15));
270 static u16 get_segment_reg(
struct task_struct *task,
unsigned long offset)
281 asm(
"movl %%fs,%0" :
"=r" (
seg));
284 return task->
thread.fsindex;
287 asm(
"movl %%gs,%0" :
"=r" (
seg));
290 return task->
thread.gsindex;
293 asm(
"movl %%ds,%0" :
"=r" (
seg));
299 asm(
"movl %%es,%0" :
"=r" (
seg));
311 static int set_segment_reg(
struct task_struct *task,
312 unsigned long offset,
u16 value)
317 if (invalid_selector(value))
333 loadsegment(
fs, task->
thread.fsindex);
347 load_gs_index(task->
thread.gsindex);
366 #ifdef CONFIG_IA32_EMULATION
367 if (test_tsk_thread_flag(task,
TIF_IA32))
374 #ifdef CONFIG_IA32_EMULATION
375 if (test_tsk_thread_flag(task,
TIF_IA32))
386 static unsigned long get_flags(
struct task_struct *task)
419 unsigned long offset,
unsigned long value)
428 return set_segment_reg(child, offset, value);
442 if (child->
thread.fs != value)
451 if (child->
thread.gs != value)
470 return get_segment_reg(task, offset);
482 unsigned int seg = task->
thread.fsindex;
486 asm(
"movl %%fs,%0" :
"=r" (
seg));
495 unsigned int seg = task->
thread.gsindex;
499 asm(
"movl %%gs,%0" :
"=r" (
seg));
512 unsigned int pos,
unsigned int count,
513 void *kbuf,
void __user *ubuf)
516 unsigned long *
k = kbuf;
517 while (count >=
sizeof(*k)) {
518 *k++ =
getreg(target, pos);
523 unsigned long __user *
u = ubuf;
524 while (count >=
sizeof(*u)) {
537 unsigned int pos,
unsigned int count,
538 const void *kbuf,
const void __user *ubuf)
542 const unsigned long *k = kbuf;
543 while (count >=
sizeof(*k) && !ret) {
544 ret =
putreg(target, pos, *k++);
549 const unsigned long __user *u = ubuf;
550 while (count >=
sizeof(*u) && !ret) {
555 ret =
putreg(target, pos, word);
564 struct perf_sample_data *
data,
574 for (i = 0; i <
HBP_NUM; i++) {
587 static unsigned long ptrace_get_dr7(
struct perf_event *bp[])
593 for (i = 0; i <
HBP_NUM; i++) {
594 if (bp[i] && !bp[i]->
attr.disabled) {
595 info = counter_arch_bp(bp[i]);
608 int gen_len, gen_type;
624 attr.bp_len = gen_len;
625 attr.bp_type = gen_type;
634 static int ptrace_write_dr7(
struct task_struct *tsk,
unsigned long data)
637 unsigned long old_dr7;
638 int i, orig_ret = 0,
rc = 0;
643 if (ptrace_get_breakpoints(tsk) < 0)
653 for (i = 0; i <
HBP_NUM; i++) {
669 rc = ptrace_modify_breakpoint(bp, len, type,
677 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
694 ptrace_put_breakpoints(tsk);
696 return ((orig_ret < 0) ? orig_ret :
rc);
702 static unsigned long ptrace_get_debugreg(
struct task_struct *tsk,
int n)
705 unsigned long val = 0;
710 if (ptrace_get_breakpoints(tsk) < 0)
717 val = bp->hw.info.address;
719 ptrace_put_breakpoints(tsk);
728 static int ptrace_set_breakpoint_addr(
struct task_struct *tsk,
int nr,
736 if (ptrace_get_breakpoints(tsk) < 0)
740 ptrace_breakpoint_init(&
attr);
777 ptrace_put_breakpoints(tsk);
791 if (n == 4 || n == 5)
799 rc = ptrace_set_breakpoint_addr(tsk, n, val);
805 rc = ptrace_write_dr7(tsk, val);
818 static int ioperm_active(
struct task_struct *target,
821 return target->
thread.io_bitmap_max / regset->
size;
826 unsigned int pos,
unsigned int count,
827 void *kbuf,
void __user *ubuf)
829 if (!target->
thread.io_bitmap_ptr)
832 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
833 target->
thread.io_bitmap_ptr,
845 #ifdef TIF_SYSCALL_EMU
850 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
855 unsigned long addr,
unsigned long data)
858 unsigned long __user *datap = (
unsigned long __user *)data;
866 if ((addr & (
sizeof(data) - 1)) || addr >=
sizeof(
struct user))
871 tmp =
getreg(child, addr);
875 tmp = ptrace_get_debugreg(child, addr /
sizeof(data));
883 if ((addr & (
sizeof(data) - 1)) || addr >=
sizeof(
struct user))
887 ret =
putreg(child, addr, data);
892 addr /
sizeof(data), data);
897 return copy_regset_to_user(child,
904 return copy_regset_from_user(child,
911 return copy_regset_to_user(child,
918 return copy_regset_from_user(child,
926 return copy_regset_to_user(child, &user_x86_32_view,
932 return copy_regset_from_user(child, &user_x86_32_view,
938 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
958 case PTRACE_ARCH_PRCTL:
971 #ifdef CONFIG_IA32_EMULATION
979 case offsetof(struct user32, regs.l): \
980 regs->q = value; break
983 case offsetof(struct user32, regs.rs): \
984 return set_segment_reg(child, \
985 offsetof(struct user_regs_struct, rs), \
989 static int putreg32(
struct task_struct *child,
unsigned regno,
u32 value)
1020 regs->orig_ax = value;
1034 if (regno >
sizeof(
struct user32) || (regno & 3))
1050 case offsetof(struct user32, regs.l): \
1051 *val = regs->q; break
1054 case offsetof(struct user32, regs.rs): \
1055 *val = get_segment_reg(child, \
1056 offsetof(struct user_regs_struct, rs)); \
1059 static int getreg32(
struct task_struct *child,
unsigned regno,
u32 *val)
1079 R32(orig_eax, orig_ax);
1084 *val = get_flags(child);
1090 *val = ptrace_get_debugreg(child, regno / 4);
1094 if (regno >
sizeof(
struct user32) || (regno & 3))
1112 unsigned int pos,
unsigned int count,
1113 void *kbuf,
void __user *ubuf)
1117 while (count >=
sizeof(*k)) {
1118 getreg32(target, pos, k++);
1119 count -=
sizeof(*k);
1124 while (count >=
sizeof(*u)) {
1126 getreg32(target, pos, &word);
1129 count -=
sizeof(*u);
1139 unsigned int pos,
unsigned int count,
1140 const void *kbuf,
const void __user *ubuf)
1145 while (count >=
sizeof(*k) && !ret) {
1146 ret = putreg32(target, pos, *k++);
1147 count -=
sizeof(*k);
1152 while (count >=
sizeof(*u) && !ret) {
1157 ret = putreg32(target, pos, word);
1158 count -=
sizeof(*u);
1165 #ifdef CONFIG_X86_X32_ABI
1166 static long x32_arch_ptrace(
struct task_struct *child,
1170 unsigned long addr = caddr;
1171 unsigned long data =
cdata;
1172 void __user *datap = compat_ptr(data);
1182 if ((addr & (
sizeof(data) - 1)) || addr >=
sizeof(
struct user) ||
1188 tmp =
getreg(child, addr);
1189 else if (addr >=
offsetof(
struct user, u_debugreg[0]) &&
1192 tmp = ptrace_get_debugreg(child, addr /
sizeof(data));
1203 if ((addr & (
sizeof(data) - 1)) || addr >=
sizeof(
struct user) ||
1208 ret =
putreg(child, addr, data);
1209 else if (addr >=
offsetof(
struct user, u_debugreg[0]) &&
1213 addr /
sizeof(data), data);
1218 return copy_regset_to_user(child,
1225 return copy_regset_from_user(child,
1232 return copy_regset_to_user(child,
1239 return copy_regset_from_user(child,
1246 return compat_ptrace_request(child, request, addr, data);
1256 unsigned long addr = caddr;
1257 unsigned long data =
cdata;
1258 void __user *datap = compat_ptr(data);
1262 #ifdef CONFIG_X86_X32_ABI
1263 if (!is_ia32_task())
1264 return x32_arch_ptrace(child, request, caddr, cdata);
1269 ret = getreg32(child, addr, &val);
1275 ret = putreg32(child, addr, data);
1279 return copy_regset_to_user(child, &user_x86_32_view,
1285 return copy_regset_from_user(child, &user_x86_32_view,
1291 return copy_regset_to_user(child, &user_x86_32_view,
1297 return copy_regset_from_user(
1302 return copy_regset_to_user(child, &user_x86_32_view,
1308 return copy_regset_from_user(child, &user_x86_32_view,
1318 return compat_ptrace_request(child, request, addr, data);
1326 #ifdef CONFIG_X86_64
1333 .get = genregs_get, .set = genregs_set
1338 .size =
sizeof(
long), .
align =
sizeof(
long),
1350 .size =
sizeof(
long), .
align =
sizeof(
long),
1351 .active = ioperm_active, .get = ioperm_get
1357 .regsets = x86_64_regsets, .n =
ARRAY_SIZE(x86_64_regsets)
1362 #define user_regs_struct32 user_regs_struct
1363 #define genregs32_get genregs_get
1364 #define genregs32_set genregs_set
1368 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1369 static struct user_regset x86_32_regsets[] __read_mostly = {
1373 .size =
sizeof(
u32), .
align =
sizeof(u32),
1379 .size =
sizeof(
u32), .
align =
sizeof(u32),
1385 .size =
sizeof(
u32), .
align =
sizeof(u32),
1405 .size =
sizeof(
u32), .align =
sizeof(
u32),
1406 .
active = ioperm_active, .
get = ioperm_get
1412 .regsets = x86_32_regsets, .n =
ARRAY_SIZE(x86_32_regsets)
1424 #ifdef CONFIG_X86_64
1427 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1435 #ifdef CONFIG_IA32_EMULATION
1436 if (test_tsk_thread_flag(task,
TIF_IA32))
1438 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1439 return &user_x86_32_view;
1441 #ifdef CONFIG_X86_64
1442 return &user_x86_64_view;
1446 static void fill_sigtrap_info(
struct task_struct *tsk,
1454 memset(info, 0,
sizeof(*info));
1457 info->si_addr = user_mode_vm(regs) ? (
void __user *)regs->ip :
NULL;
1464 fill_sigtrap_info(tsk, regs, 0,
TRAP_BRKPT, info);
1468 int error_code,
int si_code)
1472 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1478 #ifdef CONFIG_X86_32
1480 #elif defined CONFIG_IA32_EMULATION
1481 # define IS_IA32 is_compat_task()
1507 if (secure_computing(regs->orig_ax)) {
1517 tracehook_report_syscall_entry(regs))
1521 trace_sys_enter(regs, regs->orig_ax);
1527 regs->dx, regs->si);
1528 #ifdef CONFIG_X86_64
1533 regs->dx, regs->
r10);
1537 return ret ?: regs->orig_ax;
1551 audit_syscall_exit(regs);
1554 trace_sys_exit(regs, regs->ax);
1565 tracehook_report_syscall_exit(regs, step);