8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/list.h>
13 #include <linux/hash.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
18 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <linux/errno.h>
31 #define KMMIO_PAGE_HASH_BITS 4
32 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
94 list_for_each_entry_rcu(p, &kmmio_probes,
list) {
108 head = kmmio_page_list(page);
109 list_for_each_entry_rcu(f, head,
list) {
156 pr_err(
"unexpected page level 0x%x.\n", level);
180 pr_warning(
"double-arm: page 0x%08lx, ref %d, old %d\n",
183 ret = clear_page_presence(f,
true);
193 int ret = clear_page_presence(f,
false);
231 faultpage = get_kmmio_fault_page(addr);
243 if (addr == ctx->
addr) {
249 pr_debug(
"secondary hit for 0x%08lx CPU %d.\n",
253 pr_info(
"unexpected secondary hit for address 0x%08lx on CPU %d.\n",
261 pr_emerg(
"recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
264 disarm_kmmio_fault_page(faultpage);
270 ctx->
fpage = faultpage;
271 ctx->
probe = get_kmmio_probe(addr);
276 ctx->
probe->pre_handler(ctx->
probe, regs, addr);
286 disarm_kmmio_fault_page(ctx->
fpage);
311 static int post_kmmio_handler(
unsigned long condition,
struct pt_regs *
regs)
322 pr_warning(
"unexpected debug trap on CPU %d.\n",
328 ctx->
probe->post_handler(ctx->
probe, condition, regs);
331 spin_lock(&kmmio_lock);
332 if (ctx->
fpage->count)
333 arm_kmmio_fault_page(ctx->
fpage);
334 spin_unlock(&kmmio_lock);
358 static int add_kmmio_fault_page(
unsigned long page)
363 f = get_kmmio_fault_page(page);
366 arm_kmmio_fault_page(f);
378 if (arm_kmmio_fault_page(f)) {
383 list_add_rcu(&f->
list, kmmio_page_list(f->
page));
389 static void release_kmmio_fault_page(
unsigned long page,
395 f = get_kmmio_fault_page(page);
402 disarm_kmmio_fault_page(f);
422 unsigned long size = 0;
426 if (get_kmmio_probe(p->
addr)) {
431 list_add_rcu(&p->
list, &kmmio_probes);
432 while (size < size_lim) {
433 if (add_kmmio_fault_page(p->
addr + size))
434 pr_err(
"Unable to set page fault.\n");
438 spin_unlock_irqrestore(&kmmio_lock, flags);
448 static void rcu_free_kmmio_fault_pages(
struct rcu_head *head)
464 static void remove_kmmio_fault_pages(
struct rcu_head *head)
475 list_del_rcu(&f->
list);
484 spin_unlock_irqrestore(&kmmio_lock, flags);
506 unsigned long size = 0;
512 while (size < size_lim) {
513 release_kmmio_fault_page(p->
addr + size, &release_list);
516 list_del_rcu(&p->
list);
518 spin_unlock_irqrestore(&kmmio_lock, flags);
525 pr_crit(
"leaking kmmio_fault_page objects.\n");
544 call_rcu(&drelease->
rcu, remove_kmmio_fault_pages);
552 unsigned long* dr6_p = (
unsigned long *)ERR_PTR(arg->
err);
555 if (post_kmmio_handler(*dr6_p, arg->
regs) == 1) {
568 .notifier_call = kmmio_die_notifier
576 INIT_LIST_HEAD(&kmmio_page_table[i]);
587 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
588 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");