35 #include <linux/hash.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
45 #include <linux/sysctl.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <asm/uaccess.h>
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
65 #ifndef kprobe_lookup_name
66 #define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
70 static int kprobes_initialized;
75 static bool kprobes_all_disarmed;
86 return &(kretprobe_table_locks[
hash].lock);
97 {
"preempt_schedule",},
98 {
"native_get_debugreg",},
99 {
"irq_entries_start",},
100 {
"common_interrupt",},
105 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
112 struct kprobe_insn_page {
120 #define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
124 struct kprobe_insn_cache {
130 static int slots_per_page(
struct kprobe_insn_cache *
c)
135 enum kprobe_slot_state {
142 static struct kprobe_insn_cache kprobe_insn_slots = {
147 static int __kprobes collect_garbage_slots(
struct kprobe_insn_cache *
c);
155 struct kprobe_insn_page *kip;
159 if (kip->nused < slots_per_page(c)) {
161 for (i = 0; i < slots_per_page(c); i++) {
162 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[
i] = SLOT_USED;
165 return kip->insns + (i * c->insn_size);
169 kip->nused = slots_per_page(c);
175 if (c->nr_garbage && collect_garbage_slots(c) == 0)
193 INIT_LIST_HEAD(&kip->list);
194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
195 kip->slot_used[0] = SLOT_USED;
198 list_add(&kip->list, &c->pages);
208 ret = __get_insn_slot(&kprobe_insn_slots);
215 static int __kprobes collect_one_slot(
struct kprobe_insn_page *kip,
int idx)
217 kip->slot_used[
idx] = SLOT_CLEAN;
219 if (kip->nused == 0) {
226 if (!list_is_singular(&kip->list)) {
236 static int __kprobes collect_garbage_slots(
struct kprobe_insn_cache *c)
238 struct kprobe_insn_page *kip, *
next;
245 if (kip->ngarbage == 0)
248 for (i = 0; i < slots_per_page(c); i++) {
249 if (kip->slot_used[i] == SLOT_DIRTY &&
250 collect_one_slot(kip, i))
258 static void __kprobes __free_insn_slot(
struct kprobe_insn_cache *c,
261 struct kprobe_insn_page *kip;
264 long idx = ((
long)slot - (
long)kip->insns) /
266 if (idx >= 0 && idx < slots_per_page(c)) {
267 WARN_ON(kip->slot_used[idx] != SLOT_USED);
269 kip->slot_used[
idx] = SLOT_DIRTY;
271 if (++c->nr_garbage > slots_per_page(c))
272 collect_garbage_slots(c);
274 collect_one_slot(kip, idx);
285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
288 #ifdef CONFIG_OPTPROBES
291 static struct kprobe_insn_cache kprobe_optinsn_slots = {
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
318 static inline void set_kprobe_instance(
struct kprobe *kp)
323 static inline void reset_kprobe_instance(
void)
341 hlist_for_each_entry_rcu(p, node, head,
hlist) {
352 static inline int kprobe_aggrprobe(
struct kprobe *
p)
358 static inline int kprobe_unused(
struct kprobe *
p)
360 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
361 list_empty(&p->
list);
367 static inline void copy_kprobe(
struct kprobe *ap,
struct kprobe *
p)
373 #ifdef CONFIG_OPTPROBES
375 static bool kprobes_allow_optimization;
385 list_for_each_entry_rcu(kp, &p->
list,
list) {
387 set_kprobe_instance(kp);
390 reset_kprobe_instance();
397 struct optimized_kprobe *
op;
406 static inline int kprobe_optready(
struct kprobe *p)
408 struct optimized_kprobe *
op;
410 if (kprobe_aggrprobe(p)) {
412 return arch_prepared_optinsn(&op->optinsn);
421 struct optimized_kprobe *
op;
424 if (!kprobe_aggrprobe(p))
425 return kprobe_disabled(p);
429 return kprobe_disabled(p) && list_empty(&op->list);
435 struct optimized_kprobe *
op;
437 if (kprobe_aggrprobe(p)) {
439 if (!list_empty(&op->list))
453 struct optimized_kprobe *
op;
459 if (p && kprobe_optready(p)) {
475 #define OPTIMIZE_DELAY 5
481 static __kprobes void do_optimize_kprobes(
void)
484 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
485 list_empty(&optimizing_list))
511 struct optimized_kprobe *
op, *
tmp;
514 if (list_empty(&unoptimizing_list))
524 if (kprobe_disabled(&op->kp))
526 if (kprobe_unused(&op->kp)) {
532 hlist_del_rcu(&op->kp.hlist);
534 list_del_init(&op->list);
543 struct optimized_kprobe *
op, *
tmp;
546 BUG_ON(!kprobe_unused(&op->kp));
547 list_del_init(&op->list);
548 free_aggr_kprobe(&op->kp);
553 static __kprobes void kick_kprobe_optimizer(
void)
572 do_unoptimize_kprobes(&free_list);
584 do_optimize_kprobes();
587 do_free_cleaned_kprobes(&free_list);
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
594 kick_kprobe_optimizer();
610 struct optimized_kprobe *
op;
613 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
614 (kprobe_disabled(p) || kprobes_all_disarmed))
632 if (!list_empty(&op->list))
634 list_del_init(&op->list);
636 list_add(&op->list, &optimizing_list);
637 kick_kprobe_optimizer();
642 static __kprobes void force_unoptimize_kprobe(
struct optimized_kprobe *op)
647 if (kprobe_disabled(&op->kp))
654 struct optimized_kprobe *
op;
660 if (!kprobe_optimized(p)) {
662 if (force && !list_empty(&op->list)) {
668 list_del_init(&op->list);
669 force_unoptimize_kprobe(op);
675 if (!list_empty(&op->list)) {
677 list_del_init(&op->list);
683 force_unoptimize_kprobe(op);
685 list_add(&op->list, &unoptimizing_list);
686 kick_kprobe_optimizer();
691 static void reuse_unused_kprobe(
struct kprobe *ap)
693 struct optimized_kprobe *
op;
695 BUG_ON(!kprobe_unused(ap));
701 if (
unlikely(list_empty(&op->list)))
703 "aggrprobe@%p\n", ap->
addr);
707 BUG_ON(!kprobe_optready(ap));
714 struct optimized_kprobe *
op;
717 if (!list_empty(&op->list))
719 list_del_init(&op->list);
729 struct optimized_kprobe *
op;
738 struct optimized_kprobe *
op;
740 op = kzalloc(
sizeof(
struct optimized_kprobe),
GFP_KERNEL);
744 INIT_LIST_HEAD(&op->list);
745 op->kp.addr = p->
addr;
760 struct optimized_kprobe *
op;
763 if (kprobe_ftrace(p))
770 ap = alloc_aggr_kprobe(p);
775 if (!arch_prepared_optinsn(&op->optinsn)) {
782 init_aggr_kprobe(ap, p);
792 static void __kprobes optimize_all_kprobes(
void)
800 if (kprobes_allow_optimization)
803 kprobes_allow_optimization =
true;
805 head = &kprobe_table[
i];
806 hlist_for_each_entry_rcu(p, node, head,
hlist)
807 if (!kprobe_disabled(p))
814 static
void __kprobes unoptimize_all_kprobes(
void)
822 if (!kprobes_allow_optimization)
825 kprobes_allow_optimization =
false;
827 head = &kprobe_table[
i];
828 hlist_for_each_entry_rcu(p, node, head,
hlist) {
829 if (!kprobe_disabled(p))
838 int sysctl_kprobes_optimization;
846 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
849 if (sysctl_kprobes_optimization)
850 optimize_all_kprobes();
852 unoptimize_all_kprobes();
865 _p = get_optimized_kprobe((
unsigned long)p->
addr);
881 if (!kprobe_queued(p)) {
884 _p = get_optimized_kprobe((
unsigned long)p->
addr);
893 #define optimize_kprobe(p) do {} while (0)
894 #define unoptimize_kprobe(p, f) do {} while (0)
895 #define kill_optimized_kprobe(p) do {} while (0)
896 #define prepare_optimized_kprobe(p) do {} while (0)
897 #define try_to_optimize_kprobe(p) do {} while (0)
898 #define __arm_kprobe(p) arch_arm_kprobe(p)
899 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
900 #define kprobe_disarmed(p) kprobe_disabled(p)
901 #define wait_for_kprobe_optimizer() do {} while (0)
904 static void reuse_unused_kprobe(
struct kprobe *ap)
906 printk(
KERN_ERR "Error: There should be no unused kprobe here.\n");
907 BUG_ON(kprobe_unused(ap));
922 #ifdef KPROBES_CAN_USE_FTRACE
924 .func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS,
927 static int kprobe_ftrace_enabled;
932 if (!kprobe_ftrace(p))
935 return arch_prepare_kprobe_ftrace(p);
944 (
unsigned long)p->
addr, 0, 0);
945 WARN(ret < 0,
"Failed to arm kprobe-ftrace at %p (%d)\n", p->
addr, ret);
946 kprobe_ftrace_enabled++;
947 if (kprobe_ftrace_enabled == 1) {
949 WARN(ret < 0,
"Failed to init kprobe-ftrace (%d)\n", ret);
958 kprobe_ftrace_enabled--;
959 if (kprobe_ftrace_enabled == 0) {
961 WARN(ret < 0,
"Failed to init kprobe-ftrace (%d)\n", ret);
964 (
unsigned long)p->
addr, 1, 0);
965 WARN(ret < 0,
"Failed to disarm kprobe-ftrace at %p (%d)\n", p->
addr, ret);
968 #define prepare_kprobe(p) arch_prepare_kprobe(p)
969 #define arm_kprobe_ftrace(p) do {} while (0)
970 #define disarm_kprobe_ftrace(p) do {} while (0)
1011 list_for_each_entry_rcu(kp, &p->
list,
list) {
1013 set_kprobe_instance(kp);
1017 reset_kprobe_instance();
1023 unsigned long flags)
1027 list_for_each_entry_rcu(kp, &p->
list,
list) {
1029 set_kprobe_instance(kp);
1031 reset_kprobe_instance();
1061 reset_kprobe_instance();
1069 if (!kprobe_aggrprobe(p)) {
1072 list_for_each_entry_rcu(kp, &p->
list,
list)
1084 hlist_del(&ri->
hlist);
1085 INIT_HLIST_NODE(&ri->
hlist);
1092 hlist_add_head(&ri->
hlist, head);
1096 struct hlist_head **head,
unsigned long *flags)
1102 *head = &kretprobe_inst_table[
hash];
1103 hlist_lock = kretprobe_table_lock_ptr(hash);
1107 static void __kprobes kretprobe_table_lock(
unsigned long hash,
1108 unsigned long *flags)
1116 unsigned long *flags)
1122 hlist_lock = kretprobe_table_lock_ptr(hash);
1126 static void __kprobes kretprobe_table_unlock(
unsigned long hash,
1127 unsigned long *flags)
1145 unsigned long hash, flags = 0;
1147 if (
unlikely(!kprobes_initialized))
1153 head = &kretprobe_inst_table[
hash];
1154 kretprobe_table_lock(hash, &flags);
1159 kretprobe_table_unlock(hash, &flags);
1161 hlist_del(&ri->
hlist);
1166 static inline void free_rp_inst(
struct kretprobe *
rp)
1172 hlist_del(&ri->
hlist);
1186 kretprobe_table_lock(hash, &flags);
1187 head = &kretprobe_inst_table[
hash];
1192 kretprobe_table_unlock(hash, &flags);
1203 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1211 list_add_tail_rcu(&p->
list, &ap->
list);
1240 INIT_LIST_HEAD(&ap->
list);
1241 INIT_HLIST_NODE(&ap->
hlist);
1255 struct kprobe *ap = orig_p;
1266 if (!kprobe_aggrprobe(orig_p)) {
1268 ap = alloc_aggr_kprobe(orig_p);
1273 init_aggr_kprobe(ap, orig_p);
1274 }
else if (kprobe_unused(ap))
1276 reuse_unused_kprobe(ap);
1278 if (kprobe_gone(ap)) {
1307 ret = add_new_kprobe(ap, p);
1312 jump_label_unlock();
1314 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1316 if (!kprobes_all_disarmed)
1323 static int __kprobes in_kprobes_functions(
unsigned long addr)
1327 if (addr >= (
unsigned long)__kprobes_text_start &&
1328 addr < (
unsigned long)__kprobes_text_end)
1334 for (kb = kprobe_blacklist; kb->
name !=
NULL; kb++) {
1375 struct kprobe *ap, *list_p;
1382 list_for_each_entry_rcu(list_p, &ap->
list,
list)
1398 if (__get_valid_kprobe(p))
1406 struct module **probed_mod)
1409 unsigned long ftrace_addr;
1415 ftrace_addr = ftrace_location((
unsigned long)p->
addr);
1417 #ifdef KPROBES_CAN_USE_FTRACE
1419 if ((
unsigned long)p->
addr != ftrace_addr)
1432 in_kprobes_functions((
unsigned long) p->
addr) ||
1433 jump_label_text_reserved(p->
addr, p->
addr)) {
1445 if (
unlikely(!try_module_get(*probed_mod))) {
1454 if (within_module_init((
unsigned long)p->
addr, *probed_mod) &&
1455 (*probed_mod)->state != MODULE_STATE_COMING) {
1456 module_put(*probed_mod);
1463 jump_label_unlock();
1472 struct module *probed_mod;
1476 addr = kprobe_addr(p);
1478 return PTR_ERR(addr);
1481 ret = check_kprobe_rereg(p);
1488 INIT_LIST_HEAD(&p->
list);
1490 ret = check_kprobe_address_safe(p, &probed_mod);
1499 ret = register_aggr_kprobe(old_p, p);
1509 INIT_HLIST_NODE(&p->
hlist);
1510 hlist_add_head_rcu(&p->
hlist,
1513 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1523 module_put(probed_mod);
1534 list_for_each_entry_rcu(kp, &ap->
list,
list)
1535 if (!kprobe_disabled(kp))
1551 orig_p = __get_valid_kprobe(p);
1555 if (!kprobe_disabled(p)) {
1561 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1562 disarm_kprobe(orig_p,
true);
1575 struct kprobe *ap, *list_p;
1578 ap = __disable_kprobe(p);
1590 WARN_ON(!kprobe_aggrprobe(ap));
1603 list_for_each_entry_rcu(list_p, &ap->
list,
list) {
1614 list_del_rcu(&p->
list);
1615 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1626 hlist_del_rcu(&ap->
hlist);
1634 if (list_empty(&p->
list))
1637 else if (list_is_singular(&p->
list)) {
1641 free_aggr_kprobe(ap);
1652 for (i = 0; i < num; i++) {
1677 for (i = 0; i < num; i++)
1678 if (__unregister_kprobe_top(kps[i]) < 0)
1683 for (i = 0; i < num; i++)
1685 __unregister_kprobe_bottom(kps[i]);
1691 .priority = 0x7fffffff
1696 return (
unsigned long)
entry;
1706 for (i = 0; i < num; i++) {
1749 for (i = 0; i < num; i++)
1750 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1755 for (i = 0; i < num; i++) {
1756 if (jps[i]->kp.
addr)
1757 __unregister_kprobe_bottom(&jps[i]->kp);
1762 #ifdef CONFIG_KRETPROBES
1771 unsigned long hash, flags = 0;
1780 hlist_del(&ri->
hlist);
1796 INIT_HLIST_NODE(&ri->
hlist);
1797 kretprobe_table_lock(hash, &flags);
1798 hlist_add_head(&ri->
hlist, &kretprobe_inst_table[hash]);
1799 kretprobe_table_unlock(hash, &flags);
1815 addr = kprobe_addr(&rp->
kp);
1817 return PTR_ERR(addr);
1825 rp->
kp.pre_handler = pre_handler_kretprobe;
1826 rp->
kp.post_handler =
NULL;
1827 rp->
kp.fault_handler =
NULL;
1828 rp->
kp.break_handler =
NULL;
1832 #ifdef CONFIG_PREEMPT
1847 INIT_HLIST_NODE(&inst->
hlist);
1866 for (i = 0; i < num; i++) {
1891 for (i = 0; i < num; i++)
1892 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1897 for (i = 0; i < num; i++) {
1898 if (rps[i]->kp.
addr) {
1899 __unregister_kprobe_bottom(&rps[i]->kp);
1900 cleanup_rp_inst(rps[i]);
1943 if (kprobe_aggrprobe(p)) {
1948 list_for_each_entry_rcu(kp, &p->
list,
list)
1969 if (__disable_kprobe(kp) == NULL)
1986 p = __get_valid_kprobe(kp);
1992 if (kprobe_gone(kp)) {
2001 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2020 unsigned long val,
void *
data)
2027 int checkcore = (val == MODULE_STATE_GOING);
2029 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2040 head = &kprobe_table[
i];
2041 hlist_for_each_entry_rcu(p, node, head,
hlist)
2042 if (within_module_init((
unsigned long)p->addr, mod) ||
2044 within_module_core((
unsigned long)p->addr, mod))) {
2058 .notifier_call = kprobes_module_callback,
2062 static int __init init_kprobes(
void)
2067 const char *symbol_name;
2087 for (kb = kprobe_blacklist; kb->
name !=
NULL; kb++) {
2094 &
size, &offset, &modname, namebuf);
2107 printk(
"kretprobe: lookup failed: %s\n",
2112 #if defined(CONFIG_OPTPROBES)
2113 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2118 kprobes_allow_optimization =
true;
2122 kprobes_all_disarmed =
false;
2130 kprobes_initialized = (err == 0);
2137 #ifdef CONFIG_DEBUG_FS
2139 const char *sym,
int offset,
char *modname,
struct kprobe *
pp)
2152 p->
addr, kprobe_type, sym, offset,
2153 (modname ? modname :
" "));
2161 (kprobe_gone(p) ?
"[GONE]" :
""),
2162 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
"[DISABLED]" :
""),
2163 (kprobe_optimized(pp) ?
"[OPTIMIZED]" :
""),
2164 (kprobe_ftrace(pp) ?
"[FTRACE]" :
""));
2169 return (*pos < KPROBE_TABLE_SIZE) ? pos :
NULL;
2175 if (*pos >= KPROBE_TABLE_SIZE)
2190 const char *sym =
NULL;
2191 unsigned int i = *(loff_t *) v;
2192 unsigned long offset = 0;
2195 head = &kprobe_table[
i];
2197 hlist_for_each_entry_rcu(p, node, head,
hlist) {
2199 &offset, &modname, namebuf);
2200 if (kprobe_aggrprobe(p)) {
2201 list_for_each_entry_rcu(kp, &p->
list,
list)
2202 report_probe(pi, kp, sym, offset, modname, p);
2204 report_probe(pi, p, sym, offset, modname, NULL);
2211 .
start = kprobe_seq_start,
2212 .next = kprobe_seq_next,
2213 .stop = kprobe_seq_stop,
2214 .show = show_kprobe_addr
2219 return seq_open(filp, &kprobes_seq_ops);
2223 .
open = kprobes_open,
2229 static void __kprobes arm_all_kprobes(
void)
2239 if (!kprobes_all_disarmed)
2240 goto already_enabled;
2244 head = &kprobe_table[
i];
2245 hlist_for_each_entry_rcu(p, node, head,
hlist)
2246 if (!kprobe_disabled(p))
2250 kprobes_all_disarmed =
false;
2258 static
void __kprobes disarm_all_kprobes(
void)
2268 if (kprobes_all_disarmed) {
2273 kprobes_all_disarmed =
true;
2277 head = &kprobe_table[
i];
2278 hlist_for_each_entry_rcu(p, node, head,
hlist) {
2280 disarm_kprobe(p,
false);
2295 char __user *user_buf,
size_t count, loff_t *ppos)
2299 if (!kprobes_all_disarmed)
2308 static ssize_t write_enabled_file_bool(
struct file *file,
2309 const char __user *user_buf,
size_t count, loff_t *ppos)
2314 buf_size =
min(count, (
sizeof(buf)-1));
2327 disarm_all_kprobes();
2335 .
read = read_enabled_file_bool,
2336 .write = write_enabled_file_bool,
2340 static int __kprobes debugfs_kprobe_init(
void)
2343 unsigned int value = 1;
2350 &debugfs_kprobes_operations);