22 #include <linux/ptrace.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
27 #include <linux/module.h>
32 #include <asm/cacheflush.h>
34 #include <asm/pgtable.h>
35 #include <asm/uaccess.h>
44 struct optimized_kprobe *
op;
52 if (kp && kprobe_optimized(kp)) {
55 if (list_empty(&op->list))
68 if (addr == (
unsigned long)kp->
addr) {
72 offs = addr - (
unsigned long)kp->
addr - 1;
76 return (
unsigned long)
buf;
88 *(
unsigned long *)addr = val;
94 ".global optprobe_template_entry\n"
95 "optprobe_template_entry:\n"
102 ".global optprobe_template_val\n"
103 "optprobe_template_val:\n"
106 ".global optprobe_template_call\n"
107 "optprobe_template_call:\n"
110 " movq 144(%rsp), %rdx\n"
111 " movq %rdx, 152(%rsp)\n"
120 ".global optprobe_template_val\n"
121 "optprobe_template_val:\n"
123 ".global optprobe_template_call\n"
124 "optprobe_template_call:\n"
130 ".global optprobe_template_end\n"
131 "optprobe_template_end:\n");
134 #define TMPL_MOVE_IDX \
135 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
136 #define TMPL_CALL_IDX \
137 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
138 #define TMPL_END_IDX \
139 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
141 #define INT3_SIZE sizeof(kprobe_opcode_t)
150 if (kprobe_disabled(&op->kp))
154 if (kprobe_running()) {
165 regs->orig_ax = ~0
UL;
169 opt_pre_handler(&op->kp, regs);
186 if (ftrace_text_reserved(src, src + len - 1) ||
187 alternatives_text_reserved(src, src + len - 1) ||
188 jump_label_text_reserved(src, src + len - 1))
197 return ((insn->
opcode.bytes[0] == 0xff &&
199 insn->
opcode.bytes[0] == 0xea);
203 static int insn_jump_into_range(
struct insn *insn,
unsigned long start,
int len)
207 switch (insn->
opcode.bytes[0]) {
216 if ((insn->
opcode.bytes[1] & 0xf0) == 0x80)
220 if ((insn->
opcode.bytes[0] & 0xf0) == 0x70)
226 return (start <= target && target <= start + len);
244 if ((paddr >= (
unsigned long)__entry_text_start) &&
245 (paddr < (
unsigned long)__entry_text_end))
254 while (addr < paddr - offset + size) {
267 insn.
kaddr = (
void *)addr;
270 if (insn_is_indirect_jump(&insn) ||
271 insn_jump_into_range(&insn, paddr +
INT3_SIZE,
286 for (i = 1; i < op->optinsn.size; i++) {
288 if (p && !kprobe_disabled(p))
299 return ((
unsigned long)op->kp.addr <= addr &&
300 (
unsigned long)op->kp.addr + op->optinsn.size > addr);
305 void __arch_remove_optimized_kprobe(
struct optimized_kprobe *
op,
int dirty)
307 if (op->optinsn.insn) {
308 free_optinsn_slot(op->optinsn.insn, dirty);
309 op->optinsn.insn =
NULL;
310 op->optinsn.size = 0;
316 __arch_remove_optimized_kprobe(op, 1);
330 if (!can_optimize((
unsigned long)op->kp.addr))
333 op->optinsn.insn = get_optinsn_slot();
334 if (!op->optinsn.insn)
342 if (
abs(rel) > 0x7fffffff)
345 buf = (
u8 *)op->optinsn.insn;
348 ret = copy_optimized_instructions(buf +
TMPL_END_IDX, op->kp.addr);
350 __arch_remove_optimized_kprobe(op, 0);
353 op->optinsn.size =
ret;
366 (
u8 *)op->kp.addr + op->optinsn.size);
374 #define MAX_OPTIMIZE_PROBES 256
376 static struct jump_poke_buffer {
382 struct optimized_kprobe *op)
384 s32 rel = (
s32)((
long)op->optinsn.insn -
392 *(
s32 *)(&insn_buf[1]) = rel;
394 tprm->
addr = op->kp.addr;
405 struct optimized_kprobe *
op, *
tmp;
409 WARN_ON(kprobe_disabled(&op->kp));
411 setup_optimize_kprobe(&jump_poke_params[c],
412 jump_poke_bufs[c].buf, op);
413 list_del_init(&op->list);
428 struct optimized_kprobe *op)
434 tprm->
addr = op->kp.addr;
446 struct optimized_kprobe *
op, *
tmp;
451 setup_unoptimize_kprobe(&jump_poke_params[c],
452 jump_poke_bufs[c].buf, op);
453 list_move(&op->list, done_list);
480 struct optimized_kprobe *
op;
488 reset_current_kprobe();
498 jump_poke_bufs =
kmalloc(
sizeof(
struct jump_poke_buffer) *
505 if (!jump_poke_params) {
506 kfree(jump_poke_bufs);
507 jump_poke_bufs =
NULL;