Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
kprobes.c
Go to the documentation of this file.
1 /*
2  * Kernel Probes (KProbes)
3  * arch/mips/kernel/kprobes.c
4  *
5  * Copyright 2006 Sony Corp.
6  * Copyright 2010 Cavium Networks
7  *
8  * Some portions copied from the powerpc version.
9  *
10  * Copyright (C) IBM Corporation, 2002, 2004
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; version 2 of the License.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  */
25 
26 #include <linux/kprobes.h>
27 #include <linux/preempt.h>
28 #include <linux/uaccess.h>
29 #include <linux/kdebug.h>
30 #include <linux/slab.h>
31 
32 #include <asm/ptrace.h>
33 #include <asm/branch.h>
34 #include <asm/break.h>
35 #include <asm/inst.h>
36 
37 static const union mips_instruction breakpoint_insn = {
38  .b_format = {
39  .opcode = spec_op,
40  .code = BRK_KPROBE_BP,
41  .func = break_op
42  }
43 };
44 
45 static const union mips_instruction breakpoint2_insn = {
46  .b_format = {
47  .opcode = spec_op,
48  .code = BRK_KPROBE_SSTEPBP,
49  .func = break_op
50  }
51 };
52 
53 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
55 
56 static int __kprobes insn_has_delayslot(union mips_instruction insn)
57 {
58  switch (insn.i_format.opcode) {
59 
60  /*
61  * This group contains:
62  * jr and jalr are in r_format format.
63  */
64  case spec_op:
65  switch (insn.r_format.func) {
66  case jr_op:
67  case jalr_op:
68  break;
69  default:
70  goto insn_ok;
71  }
72 
73  /*
74  * This group contains:
75  * bltz_op, bgez_op, bltzl_op, bgezl_op,
76  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
77  */
78  case bcond_op:
79 
80  /*
81  * These are unconditional and in j_format.
82  */
83  case jal_op:
84  case j_op:
85 
86  /*
87  * These are conditional and in i_format.
88  */
89  case beq_op:
90  case beql_op:
91  case bne_op:
92  case bnel_op:
93  case blez_op:
94  case blezl_op:
95  case bgtz_op:
96  case bgtzl_op:
97 
98  /*
99  * These are the FPA/cp1 branch instructions.
100  */
101  case cop1_op:
102 
103 #ifdef CONFIG_CPU_CAVIUM_OCTEON
104  case lwc2_op: /* This is bbit0 on Octeon */
105  case ldc2_op: /* This is bbit032 on Octeon */
106  case swc2_op: /* This is bbit1 on Octeon */
107  case sdc2_op: /* This is bbit132 on Octeon */
108 #endif
109  return 1;
110  default:
111  break;
112  }
113 insn_ok:
114  return 0;
115 }
116 
117 /*
118  * insn_has_ll_or_sc function checks whether instruction is ll or sc
119  * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
120  * so we need to prevent it and refuse kprobes insertion for such
121  * instructions; cannot do much about breakpoint in the middle of
122  * ll/sc pair; it is upto user to avoid those places
123  */
124 static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
125 {
126  int ret = 0;
127 
128  switch (insn.i_format.opcode) {
129  case ll_op:
130  case lld_op:
131  case sc_op:
132  case scd_op:
133  ret = 1;
134  break;
135  default:
136  break;
137  }
138  return ret;
139 }
140 
142 {
143  union mips_instruction insn;
144  union mips_instruction prev_insn;
145  int ret = 0;
146 
147  insn = p->addr[0];
148 
149  if (insn_has_ll_or_sc(insn)) {
150  pr_notice("Kprobes for ll and sc instructions are not"
151  "supported\n");
152  ret = -EINVAL;
153  goto out;
154  }
155 
156  if ((probe_kernel_read(&prev_insn, p->addr - 1,
157  sizeof(mips_instruction)) == 0) &&
158  insn_has_delayslot(prev_insn)) {
159  pr_notice("Kprobes for branch delayslot are not supported\n");
160  ret = -EINVAL;
161  goto out;
162  }
163 
164  /* insn: must be on special executable page on mips. */
165  p->ainsn.insn = get_insn_slot();
166  if (!p->ainsn.insn) {
167  ret = -ENOMEM;
168  goto out;
169  }
170 
171  /*
172  * In the kprobe->ainsn.insn[] array we store the original
173  * instruction at index zero and a break trap instruction at
174  * index one.
175  *
176  * On MIPS arch if the instruction at probed address is a
177  * branch instruction, we need to execute the instruction at
178  * Branch Delayslot (BD) at the time of probe hit. As MIPS also
179  * doesn't have single stepping support, the BD instruction can
180  * not be executed in-line and it would be executed on SSOL slot
181  * using a normal breakpoint instruction in the next slot.
182  * So, read the instruction and save it for later execution.
183  */
184  if (insn_has_delayslot(insn))
185  memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
186  else
187  memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
188 
189  p->ainsn.insn[1] = breakpoint2_insn;
190  p->opcode = *p->addr;
191 
192 out:
193  return ret;
194 }
195 
197 {
198  *p->addr = breakpoint_insn;
199  flush_insn_slot(p);
200 }
201 
203 {
204  *p->addr = p->opcode;
205  flush_insn_slot(p);
206 }
207 
209 {
210  free_insn_slot(p->ainsn.insn, 0);
211 }
212 
213 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
214 {
215  kcb->prev_kprobe.kp = kprobe_running();
216  kcb->prev_kprobe.status = kcb->kprobe_status;
217  kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
218  kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
219  kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
220 }
221 
222 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
223 {
224  __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
225  kcb->kprobe_status = kcb->prev_kprobe.status;
226  kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
227  kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
228  kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
229 }
230 
231 static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
232  struct kprobe_ctlblk *kcb)
233 {
234  __get_cpu_var(current_kprobe) = p;
235  kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
236  kcb->kprobe_saved_epc = regs->cp0_epc;
237 }
238 
252 static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
253  struct kprobe_ctlblk *kcb)
254 {
255  union mips_instruction insn = p->opcode;
256  long epc;
257  int ret = 0;
258 
259  epc = regs->cp0_epc;
260  if (epc & 3)
261  goto unaligned;
262 
263  if (p->ainsn.insn->word == 0)
264  kcb->flags |= SKIP_DELAYSLOT;
265  else
266  kcb->flags &= ~SKIP_DELAYSLOT;
267 
268  ret = __compute_return_epc_for_insn(regs, insn);
269  if (ret < 0)
270  return ret;
271 
272  if (ret == BRANCH_LIKELY_TAKEN)
273  kcb->flags |= SKIP_DELAYSLOT;
274 
275  kcb->target_epc = regs->cp0_epc;
276 
277  return 0;
278 
279 unaligned:
280  pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
282  return -EFAULT;
283 
284 }
285 
286 static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
287  struct kprobe_ctlblk *kcb)
288 {
289  int ret = 0;
290 
291  regs->cp0_status &= ~ST0_IE;
292 
293  /* single step inline if the instruction is a break */
294  if (p->opcode.word == breakpoint_insn.word ||
295  p->opcode.word == breakpoint2_insn.word)
296  regs->cp0_epc = (unsigned long)p->addr;
297  else if (insn_has_delayslot(p->opcode)) {
298  ret = evaluate_branch_instruction(p, regs, kcb);
299  if (ret < 0) {
300  pr_notice("Kprobes: Error in evaluating branch\n");
301  return;
302  }
303  }
304  regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
305 }
306 
307 /*
308  * Called after single-stepping. p->addr is the address of the
309  * instruction whose first byte has been replaced by the "break 0"
310  * instruction. To avoid the SMP problems that can occur when we
311  * temporarily put back the original opcode to single-step, we
312  * single-stepped a copy of the instruction. The address of this
313  * copy is p->ainsn.insn.
314  *
315  * This function prepares to return from the post-single-step
316  * breakpoint trap. In case of branch instructions, the target
317  * epc to be restored.
318  */
319 static void __kprobes resume_execution(struct kprobe *p,
320  struct pt_regs *regs,
321  struct kprobe_ctlblk *kcb)
322 {
323  if (insn_has_delayslot(p->opcode))
324  regs->cp0_epc = kcb->target_epc;
325  else {
326  unsigned long orig_epc = kcb->kprobe_saved_epc;
327  regs->cp0_epc = orig_epc + 4;
328  }
329 }
330 
331 static int __kprobes kprobe_handler(struct pt_regs *regs)
332 {
333  struct kprobe *p;
334  int ret = 0;
336  struct kprobe_ctlblk *kcb;
337 
338  addr = (kprobe_opcode_t *) regs->cp0_epc;
339 
340  /*
341  * We don't want to be preempted for the entire
342  * duration of kprobe processing
343  */
344  preempt_disable();
345  kcb = get_kprobe_ctlblk();
346 
347  /* Check we're not actually recursing */
348  if (kprobe_running()) {
349  p = get_kprobe(addr);
350  if (p) {
351  if (kcb->kprobe_status == KPROBE_HIT_SS &&
352  p->ainsn.insn->word == breakpoint_insn.word) {
353  regs->cp0_status &= ~ST0_IE;
354  regs->cp0_status |= kcb->kprobe_saved_SR;
355  goto no_kprobe;
356  }
357  /*
358  * We have reentered the kprobe_handler(), since
359  * another probe was hit while within the handler.
360  * We here save the original kprobes variables and
361  * just single step on the instruction of the new probe
362  * without calling any user handlers.
363  */
364  save_previous_kprobe(kcb);
365  set_current_kprobe(p, regs, kcb);
367  prepare_singlestep(p, regs, kcb);
368  kcb->kprobe_status = KPROBE_REENTER;
369  if (kcb->flags & SKIP_DELAYSLOT) {
370  resume_execution(p, regs, kcb);
371  restore_previous_kprobe(kcb);
373  }
374  return 1;
375  } else {
376  if (addr->word != breakpoint_insn.word) {
377  /*
378  * The breakpoint instruction was removed by
379  * another cpu right after we hit, no further
380  * handling of this interrupt is appropriate
381  */
382  ret = 1;
383  goto no_kprobe;
384  }
385  p = __get_cpu_var(current_kprobe);
386  if (p->break_handler && p->break_handler(p, regs))
387  goto ss_probe;
388  }
389  goto no_kprobe;
390  }
391 
392  p = get_kprobe(addr);
393  if (!p) {
394  if (addr->word != breakpoint_insn.word) {
395  /*
396  * The breakpoint instruction was removed right
397  * after we hit it. Another cpu has removed
398  * either a probepoint or a debugger breakpoint
399  * at this address. In either case, no further
400  * handling of this interrupt is appropriate.
401  */
402  ret = 1;
403  }
404  /* Not one of ours: let kernel handle it */
405  goto no_kprobe;
406  }
407 
408  set_current_kprobe(p, regs, kcb);
410 
411  if (p->pre_handler && p->pre_handler(p, regs)) {
412  /* handler has already set things up, so skip ss setup */
413  return 1;
414  }
415 
416 ss_probe:
417  prepare_singlestep(p, regs, kcb);
418  if (kcb->flags & SKIP_DELAYSLOT) {
419  kcb->kprobe_status = KPROBE_HIT_SSDONE;
420  if (p->post_handler)
421  p->post_handler(p, regs, 0);
422  resume_execution(p, regs, kcb);
424  } else
426 
427  return 1;
428 
429 no_kprobe:
431  return ret;
432 
433 }
434 
435 static inline int post_kprobe_handler(struct pt_regs *regs)
436 {
437  struct kprobe *cur = kprobe_running();
438  struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
439 
440  if (!cur)
441  return 0;
442 
443  if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
444  kcb->kprobe_status = KPROBE_HIT_SSDONE;
445  cur->post_handler(cur, regs, 0);
446  }
447 
448  resume_execution(cur, regs, kcb);
449 
450  regs->cp0_status |= kcb->kprobe_saved_SR;
451 
452  /* Restore back the original saved kprobes variables and continue. */
453  if (kcb->kprobe_status == KPROBE_REENTER) {
454  restore_previous_kprobe(kcb);
455  goto out;
456  }
457  reset_current_kprobe();
458 out:
460 
461  return 1;
462 }
463 
464 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
465 {
466  struct kprobe *cur = kprobe_running();
467  struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
468 
469  if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
470  return 1;
471 
472  if (kcb->kprobe_status & KPROBE_HIT_SS) {
473  resume_execution(cur, regs, kcb);
474  regs->cp0_status |= kcb->kprobe_old_SR;
475 
476  reset_current_kprobe();
478  }
479  return 0;
480 }
481 
482 /*
483  * Wrapper routine for handling exceptions.
484  */
486  unsigned long val, void *data)
487 {
488 
489  struct die_args *args = (struct die_args *)data;
490  int ret = NOTIFY_DONE;
491 
492  switch (val) {
493  case DIE_BREAK:
494  if (kprobe_handler(args->regs))
495  ret = NOTIFY_STOP;
496  break;
497  case DIE_SSTEPBP:
498  if (post_kprobe_handler(args->regs))
499  ret = NOTIFY_STOP;
500  break;
501 
502  case DIE_PAGE_FAULT:
503  /* kprobe_running() needs smp_processor_id() */
504  preempt_disable();
505 
506  if (kprobe_running()
507  && kprobe_fault_handler(args->regs, args->trapnr))
508  ret = NOTIFY_STOP;
509  preempt_enable();
510  break;
511  default:
512  break;
513  }
514  return ret;
515 }
516 
517 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
518 {
519  struct jprobe *jp = container_of(p, struct jprobe, kp);
520  struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
521 
522  kcb->jprobe_saved_regs = *regs;
523  kcb->jprobe_saved_sp = regs->regs[29];
524 
525  memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
527 
528  regs->cp0_epc = (unsigned long)(jp->entry);
529 
530  return 1;
531 }
532 
533 /* Defined in the inline asm below. */
534 void jprobe_return_end(void);
535 
537 {
538  /* Assembler quirk necessitates this '0,code' business. */
539  asm volatile(
540  "break 0,%0\n\t"
541  ".globl jprobe_return_end\n"
542  "jprobe_return_end:\n"
543  : : "n" (BRK_KPROBE_BP) : "memory");
544 }
545 
546 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
547 {
548  struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
549 
550  if (regs->cp0_epc >= (unsigned long)jprobe_return &&
551  regs->cp0_epc <= (unsigned long)jprobe_return_end) {
552  *regs = kcb->jprobe_saved_regs;
553  memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
556 
557  return 1;
558  }
559  return 0;
560 }
561 
562 /*
563  * Function return probe trampoline:
564  * - init_kprobes() establishes a probepoint here
565  * - When the probed function returns, this probe causes the
566  * handlers to fire
567  */
568 static void __used kretprobe_trampoline_holder(void)
569 {
570  asm volatile(
571  ".set push\n\t"
572  /* Keep the assembler from reordering and placing JR here. */
573  ".set noreorder\n\t"
574  "nop\n\t"
575  ".global kretprobe_trampoline\n"
576  "kretprobe_trampoline:\n\t"
577  "nop\n\t"
578  ".set pop"
579  : : : "memory");
580 }
581 
582 void kretprobe_trampoline(void);
583 
585  struct pt_regs *regs)
586 {
587  ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
588 
589  /* Replace the return addr with trampoline addr */
590  regs->regs[31] = (unsigned long)kretprobe_trampoline;
591 }
592 
593 /*
594  * Called when the probe at kretprobe trampoline is hit
595  */
596 static int __kprobes trampoline_probe_handler(struct kprobe *p,
597  struct pt_regs *regs)
598 {
599  struct kretprobe_instance *ri = NULL;
600  struct hlist_head *head, empty_rp;
601  struct hlist_node *node, *tmp;
602  unsigned long flags, orig_ret_address = 0;
603  unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
604 
605  INIT_HLIST_HEAD(&empty_rp);
606  kretprobe_hash_lock(current, &head, &flags);
607 
608  /*
609  * It is possible to have multiple instances associated with a given
610  * task either because an multiple functions in the call path
611  * have a return probe installed on them, and/or more than one return
612  * return probe was registered for a target function.
613  *
614  * We can handle this because:
615  * - instances are always inserted at the head of the list
616  * - when multiple return probes are registered for the same
617  * function, the first instance's ret_addr will point to the
618  * real return address, and all the rest will point to
619  * kretprobe_trampoline
620  */
621  hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
622  if (ri->task != current)
623  /* another task is sharing our hash bucket */
624  continue;
625 
626  if (ri->rp && ri->rp->handler)
627  ri->rp->handler(ri, regs);
628 
629  orig_ret_address = (unsigned long)ri->ret_addr;
630  recycle_rp_inst(ri, &empty_rp);
631 
632  if (orig_ret_address != trampoline_address)
633  /*
634  * This is the real return address. Any other
635  * instances associated with this task are for
636  * other calls deeper on the call stack
637  */
638  break;
639  }
640 
641  kretprobe_assert(ri, orig_ret_address, trampoline_address);
642  instruction_pointer(regs) = orig_ret_address;
643 
644  reset_current_kprobe();
647 
648  hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
649  hlist_del(&ri->hlist);
650  kfree(ri);
651  }
652  /*
653  * By returning a non-zero value, we are telling
654  * kprobe_handler() that we don't want the post_handler
655  * to run (and have re-enabled preemption)
656  */
657  return 1;
658 }
659 
661 {
663  return 1;
664 
665  return 0;
666 }
667 
668 static struct kprobe trampoline_p = {
671 };
672 
674 {
675  return register_kprobe(&trampoline_p);
676 }