25 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
33 #include <linux/ptrace.h>
35 #include "../../mm/internal.h"
39 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
40 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
46 #define UPROBES_HASH_SZ 13
68 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
72 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
82 #define UPROBE_COPY_INSN 0
84 #define UPROBE_RUN_HANDLER 1
86 #define UPROBE_SKIP_SSTEP 2
109 static bool valid_vma(
struct vm_area_struct *vma,
bool is_register)
141 struct page *
page,
struct page *kpage)
148 const unsigned long mmun_start =
addr;
149 const unsigned long mmun_end = addr +
PAGE_SIZE;
154 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
156 ptep = page_check_address(page, mm, addr, &ptl, 0);
163 if (!PageAnon(page)) {
173 if (!page_mapped(page))
175 pte_unmap_unlock(ptep, ptl);
183 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
206 static int verify_opcode(
struct page *page,
unsigned long vaddr,
uprobe_opcode_t *new_opcode)
211 copy_opcode(page, vaddr, &old_opcode);
247 static int write_opcode(
struct mm_struct *mm,
unsigned long vaddr,
250 struct page *old_page, *new_page;
251 void *vaddr_old, *vaddr_new;
261 ret = verify_opcode(old_page, vaddr, &opcode);
270 __SetPageUptodate(new_page);
276 memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
286 ret = __replace_page(vma, vaddr, old_page, new_page);
353 match = match_uprobe(&u, uprobe);
371 static struct uprobe *find_uprobe(
struct inode *inode, loff_t offset)
373 struct uprobe *uprobe;
375 spin_lock(&uprobes_treelock);
376 uprobe = __find_uprobe(inode, offset);
377 spin_unlock(&uprobes_treelock);
382 static struct uprobe *__insert_uprobe(
struct uprobe *uprobe)
392 match = match_uprobe(uprobe, u);
406 rb_link_node(&uprobe->
rb_node, parent, p);
422 static struct uprobe *insert_uprobe(
struct uprobe *uprobe)
426 spin_lock(&uprobes_treelock);
427 u = __insert_uprobe(uprobe);
428 spin_unlock(&uprobes_treelock);
436 static void put_uprobe(
struct uprobe *uprobe)
442 static struct uprobe *alloc_uprobe(
struct inode *inode, loff_t offset)
444 struct uprobe *uprobe, *cur_uprobe;
446 uprobe = kzalloc(
sizeof(
struct uprobe),
GFP_KERNEL);
456 cur_uprobe = insert_uprobe(uprobe);
470 static void handler_chain(
struct uprobe *uprobe,
struct pt_regs *
regs)
502 static bool consumer_del(
struct uprobe *uprobe,
struct uprobe_consumer *uc)
522 unsigned long nbytes, loff_t offset)
532 if (!mapping->
a_ops->readpage)
542 page = read_mapping_page(mapping, idx, filp);
544 return PTR_ERR(page);
547 memcpy(insn, vaddr + off, nbytes);
554 static int copy_insn(
struct uprobe *uprobe,
struct file *filp)
561 mapping = uprobe->
inode->i_mapping;
570 if (nbytes < bytes) {
571 int err = __copy_insn(mapping, filp, uprobe->
arch.insn + nbytes,
572 bytes - nbytes, uprobe->
offset + nbytes);
577 return __copy_insn(mapping, filp, uprobe->
arch.insn, bytes, uprobe->
offset);
580 static int prepare_uprobe(
struct uprobe *uprobe,
struct file *
file,
581 struct mm_struct *mm,
unsigned long vaddr)
592 ret = copy_insn(uprobe, file);
618 install_breakpoint(
struct uprobe *uprobe,
struct mm_struct *mm,
634 ret = prepare_uprobe(uprobe, vma->
vm_file, mm, vaddr);
649 else if (first_uprobe)
656 remove_breakpoint(
struct uprobe *uprobe,
struct mm_struct *mm,
unsigned long vaddr)
671 static void delete_uprobe(
struct uprobe *uprobe)
673 spin_lock(&uprobes_treelock);
675 spin_unlock(&uprobes_treelock);
695 build_map_info(
struct address_space *mapping, loff_t offset,
bool is_register)
706 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
707 if (!valid_vma(vma, is_register))
710 if (!prev && !more) {
734 info->
vaddr = offset_to_vaddr(vma, offset);
760 prev = free_map_info(prev);
764 static int register_for_each_vma(
struct uprobe *uprobe,
bool is_register)
769 info = build_map_info(uprobe->
inode->i_mapping,
770 uprobe->
offset, is_register);
772 return PTR_ERR(info);
778 if (err && is_register)
783 if (!vma || !valid_vma(vma, is_register) ||
788 vaddr_to_offset(vma, info->
vaddr) != uprobe->
offset)
792 err = install_breakpoint(uprobe, mm, vma, info->
vaddr);
794 err |= remove_breakpoint(uprobe, mm, info->
vaddr);
800 info = free_map_info(info);
806 static int __uprobe_register(
struct uprobe *uprobe)
808 return register_for_each_vma(uprobe,
true);
811 static void __uprobe_unregister(
struct uprobe *uprobe)
813 if (!register_for_each_vma(uprobe,
false))
814 delete_uprobe(uprobe);
838 struct uprobe *uprobe;
841 if (!inode || !uc || uc->
next)
844 if (offset > i_size_read(inode))
849 uprobe = alloc_uprobe(inode, offset);
853 }
else if (!consumer_add(uprobe, uc)) {
854 ret = __uprobe_register(uprobe);
857 __uprobe_unregister(uprobe);
878 struct uprobe *uprobe;
883 uprobe = find_uprobe(inode, offset);
889 if (consumer_del(uprobe, uc)) {
891 __uprobe_unregister(uprobe);
902 find_node_in_range(
struct inode *inode, loff_t
min, loff_t
max)
911 }
else if (inode > u->
inode) {
929 static void build_probe_list(
struct inode *inode,
931 unsigned long start,
unsigned long end,
938 INIT_LIST_HEAD(head);
939 min = vaddr_to_offset(vma, start);
940 max = min + (end -
start) - 1;
942 spin_lock(&uprobes_treelock);
943 n = find_node_in_range(inode, min, max);
952 for (t = n; (t =
rb_next(t)); ) {
960 spin_unlock(&uprobes_treelock);
972 struct uprobe *uprobe, *
u;
975 if (!
atomic_read(&uprobe_events) || !valid_vma(vma,
true))
978 inode = vma->
vm_file->f_mapping->host;
983 build_probe_list(inode, vma, vma->
vm_start, vma->
vm_end, &tmp_list);
986 if (!fatal_signal_pending(
current)) {
987 unsigned long vaddr = offset_to_vaddr(vma, uprobe->
offset);
988 install_breakpoint(uprobe, vma->
vm_mm, vma, vaddr);
998 vma_has_uprobes(
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
1001 struct inode *
inode;
1004 inode = vma->
vm_file->f_mapping->host;
1006 min = vaddr_to_offset(vma, start);
1007 max = min + (end -
start) - 1;
1009 spin_lock(&uprobes_treelock);
1010 n = find_node_in_range(inode, min, max);
1011 spin_unlock(&uprobes_treelock);
1021 if (!
atomic_read(&uprobe_events) || !valid_vma(vma,
false))
1031 if (vma_has_uprobes(vma, start, end))
1036 static int xol_add_vma(
struct xol_area *area)
1062 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1078 static struct xol_area *get_xol_area(
struct mm_struct *mm)
1080 struct xol_area *area;
1095 static struct xol_area *xol_alloc_area(
void)
1097 struct xol_area *area;
1109 if (!xol_add_vma(area))
1113 kfree(area->bitmap);
1116 return get_xol_area(
current->mm);
1130 kfree(area->bitmap);
1148 static unsigned long xol_take_insn_slot(
struct xol_area *area)
1150 unsigned long slot_addr;
1176 static unsigned long xol_get_insn_slot(
struct uprobe *uprobe,
unsigned long slot_addr)
1178 struct xol_area *area;
1182 area = get_xol_area(
current->mm);
1184 area = xol_alloc_area();
1188 current->utask->xol_vaddr = xol_take_insn_slot(area);
1197 current->utask->vaddr = slot_addr;
1203 return current->utask->xol_vaddr;
1211 static void xol_free_insn_slot(
struct task_struct *tsk)
1213 struct xol_area *area;
1214 unsigned long vma_end;
1215 unsigned long slot_addr;
1217 if (!tsk->
mm || !tsk->
mm->uprobes_state.xol_area || !tsk->utask)
1220 slot_addr = tsk->utask->xol_vaddr;
1225 area = tsk->
mm->uprobes_state.xol_area;
1227 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1231 offset = slot_addr - area->vaddr;
1238 if (waitqueue_active(&area->wq))
1241 tsk->utask->xol_vaddr = 0;
1262 struct uprobe_task *utask = t->utask;
1267 if (utask->active_uprobe)
1268 put_uprobe(utask->active_uprobe);
1270 xol_free_insn_slot(t);
1291 static struct uprobe_task *add_utask(
void)
1293 struct uprobe_task *utask;
1305 pre_ssout(
struct uprobe *uprobe,
struct pt_regs *regs,
unsigned long vaddr)
1325 struct uprobe_task *utask = t->utask;
1327 if (
likely(!utask || !utask->active_uprobe))
1332 if (signal_pending(t)) {
1333 spin_lock_irq(&t->
sighand->siglock);
1335 spin_unlock_irq(&t->
sighand->siglock);
1338 utask->state = UTASK_SSTEP_TRAPPED;
1351 static bool can_skip_sstep(
struct uprobe *uprobe,
struct pt_regs *regs)
1361 static void mmf_recalc_uprobes(
struct mm_struct *mm)
1366 if (!valid_vma(vma,
false))
1380 static int is_swbp_at_addr(
struct mm_struct *mm,
unsigned long vaddr)
1386 pagefault_disable();
1398 copy_opcode(page, vaddr, &opcode);
1404 static struct uprobe *find_active_uprobe(
unsigned long bp_vaddr,
int *is_swbp)
1407 struct uprobe *uprobe =
NULL;
1412 if (vma && vma->
vm_start <= bp_vaddr) {
1413 if (valid_vma(vma,
false)) {
1414 struct inode *inode = vma->
vm_file->f_mapping->host;
1415 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1417 uprobe = find_uprobe(inode, offset);
1421 *is_swbp = is_swbp_at_addr(mm, bp_vaddr);
1427 mmf_recalc_uprobes(mm);
1447 static void handle_swbp(
struct pt_regs *regs)
1449 struct uprobe_task *utask;
1450 struct uprobe *uprobe;
1451 unsigned long bp_vaddr;
1455 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1470 instruction_pointer_set(regs, bp_vaddr);
1485 utask = add_utask();
1491 handler_chain(uprobe, regs);
1492 if (can_skip_sstep(uprobe, regs))
1495 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1497 utask->active_uprobe = uprobe;
1498 utask->state = UTASK_SSTEP;
1507 instruction_pointer_set(regs, bp_vaddr);
1516 static void handle_singlestep(
struct uprobe_task *utask,
struct pt_regs *regs)
1518 struct uprobe *uprobe;
1520 uprobe = utask->active_uprobe;
1521 if (utask->state == UTASK_SSTEP_ACK)
1523 else if (utask->state == UTASK_SSTEP_TRAPPED)
1530 utask->active_uprobe =
NULL;
1531 utask->state = UTASK_RUNNING;
1534 spin_lock_irq(&
current->sighand->siglock);
1536 spin_unlock_irq(&
current->sighand->siglock);
1552 struct uprobe_task *utask;
1557 if (utask && utask->active_uprobe)
1558 handle_singlestep(utask, regs);
1582 struct uprobe_task *utask =
current->utask;
1584 if (!
current->mm || !utask || !utask->active_uprobe)
1588 utask->state = UTASK_SSTEP_ACK;
1598 static int __init init_uprobes(
void)
1611 static void __exit exit_uprobes(
void)