2 * User-space Probes (UProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2012
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h> /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h> /* anon_vma_prepare */
32 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
33 #include <linux/swap.h> /* try_to_free_swap */
34 #include <linux/ptrace.h> /* user_enable_single_step */
35 #include <linux/kdebug.h> /* notifier mechanism */
36 #include "../../mm/internal.h" /* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
39 #include <linux/uprobes.h>
41 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
42 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
44 static struct rb_root uprobes_tree
= RB_ROOT
;
46 * allows us to skip the uprobe_mmap if there are no uprobe events active
47 * at this time. Probably a fine grained per inode count is better?
49 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
51 static DEFINE_SPINLOCK(uprobes_treelock
); /* serialize rbtree access */
53 #define UPROBES_HASH_SZ 13
54 /* serialize uprobe->pending_list */
55 static struct mutex uprobes_mmap_mutex
[UPROBES_HASH_SZ
];
56 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
58 static struct percpu_rw_semaphore dup_mmap_sem
;
60 /* Have a copy of original instruction */
61 #define UPROBE_COPY_INSN 0
62 /* Can skip singlestep */
63 #define UPROBE_SKIP_SSTEP 1
66 struct rb_node rb_node
; /* node in the rb tree */
68 struct rw_semaphore register_rwsem
;
69 struct rw_semaphore consumer_rwsem
;
70 struct list_head pending_list
;
71 struct uprobe_consumer
*consumers
;
72 struct inode
*inode
; /* Also hold a ref to inode */
75 struct arch_uprobe arch
;
79 * valid_vma: Verify if the specified vma is an executable vma
80 * Relax restrictions while unregistering: vm_flags might have
81 * changed after breakpoint was inserted.
82 * - is_register: indicates if we are in register context.
83 * - Return 1 if the specified virtual address is in an
86 static bool valid_vma(struct vm_area_struct
*vma
, bool is_register
)
88 vm_flags_t flags
= VM_HUGETLB
| VM_MAYEXEC
| VM_SHARED
;
93 return vma
->vm_file
&& (vma
->vm_flags
& flags
) == VM_MAYEXEC
;
96 static unsigned long offset_to_vaddr(struct vm_area_struct
*vma
, loff_t offset
)
98 return vma
->vm_start
+ offset
- ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
101 static loff_t
vaddr_to_offset(struct vm_area_struct
*vma
, unsigned long vaddr
)
103 return ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
) + (vaddr
- vma
->vm_start
);
107 * __replace_page - replace page in vma by new page.
108 * based on replace_page in mm/ksm.c
110 * @vma: vma that holds the pte pointing to page
111 * @addr: address the old @page is mapped at
112 * @page: the cowed page we are replacing by kpage
113 * @kpage: the modified page we replace page by
115 * Returns 0 on success, -EFAULT on failure.
117 static int __replace_page(struct vm_area_struct
*vma
, unsigned long addr
,
118 struct page
*page
, struct page
*kpage
)
120 struct mm_struct
*mm
= vma
->vm_mm
;
124 /* For mmu_notifiers */
125 const unsigned long mmun_start
= addr
;
126 const unsigned long mmun_end
= addr
+ PAGE_SIZE
;
128 /* For try_to_free_swap() and munlock_vma_page() below */
131 mmu_notifier_invalidate_range_start(mm
, mmun_start
, mmun_end
);
133 ptep
= page_check_address(page
, mm
, addr
, &ptl
, 0);
138 page_add_new_anon_rmap(kpage
, vma
, addr
);
140 if (!PageAnon(page
)) {
141 dec_mm_counter(mm
, MM_FILEPAGES
);
142 inc_mm_counter(mm
, MM_ANONPAGES
);
145 flush_cache_page(vma
, addr
, pte_pfn(*ptep
));
146 ptep_clear_flush(vma
, addr
, ptep
);
147 set_pte_at_notify(mm
, addr
, ptep
, mk_pte(kpage
, vma
->vm_page_prot
));
149 page_remove_rmap(page
);
150 if (!page_mapped(page
))
151 try_to_free_swap(page
);
152 pte_unmap_unlock(ptep
, ptl
);
154 if (vma
->vm_flags
& VM_LOCKED
)
155 munlock_vma_page(page
);
160 mmu_notifier_invalidate_range_end(mm
, mmun_start
, mmun_end
);
166 * is_swbp_insn - check if instruction is breakpoint instruction.
167 * @insn: instruction to be checked.
168 * Default implementation of is_swbp_insn
169 * Returns true if @insn is a breakpoint instruction.
171 bool __weak
is_swbp_insn(uprobe_opcode_t
*insn
)
173 return *insn
== UPROBE_SWBP_INSN
;
176 static void copy_opcode(struct page
*page
, unsigned long vaddr
, uprobe_opcode_t
*opcode
)
178 void *kaddr
= kmap_atomic(page
);
179 memcpy(opcode
, kaddr
+ (vaddr
& ~PAGE_MASK
), UPROBE_SWBP_INSN_SIZE
);
180 kunmap_atomic(kaddr
);
183 static int verify_opcode(struct page
*page
, unsigned long vaddr
, uprobe_opcode_t
*new_opcode
)
185 uprobe_opcode_t old_opcode
;
188 copy_opcode(page
, vaddr
, &old_opcode
);
189 is_swbp
= is_swbp_insn(&old_opcode
);
191 if (is_swbp_insn(new_opcode
)) {
192 if (is_swbp
) /* register: already installed? */
195 if (!is_swbp
) /* unregister: was it changed by us? */
204 * Expect the breakpoint instruction to be the smallest size instruction for
205 * the architecture. If an arch has variable length instruction and the
206 * breakpoint instruction is not of the smallest length instruction
207 * supported by that architecture then we need to modify is_swbp_at_addr and
208 * write_opcode accordingly. This would never be a problem for archs that
209 * have fixed length instructions.
213 * write_opcode - write the opcode at a given virtual address.
214 * @mm: the probed process address space.
215 * @vaddr: the virtual address to store the opcode.
216 * @opcode: opcode to be written at @vaddr.
218 * Called with mm->mmap_sem held (for read and with a reference to
221 * For mm @mm, write the opcode at @vaddr.
222 * Return 0 (success) or a negative errno.
224 static int write_opcode(struct mm_struct
*mm
, unsigned long vaddr
,
225 uprobe_opcode_t opcode
)
227 struct page
*old_page
, *new_page
;
228 void *vaddr_old
, *vaddr_new
;
229 struct vm_area_struct
*vma
;
233 /* Read the page with vaddr into memory */
234 ret
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 1, &old_page
, &vma
);
238 ret
= verify_opcode(old_page
, vaddr
, &opcode
);
243 new_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, vaddr
);
247 __SetPageUptodate(new_page
);
249 /* copy the page now that we've got it stable */
250 vaddr_old
= kmap_atomic(old_page
);
251 vaddr_new
= kmap_atomic(new_page
);
253 memcpy(vaddr_new
, vaddr_old
, PAGE_SIZE
);
254 memcpy(vaddr_new
+ (vaddr
& ~PAGE_MASK
), &opcode
, UPROBE_SWBP_INSN_SIZE
);
256 kunmap_atomic(vaddr_new
);
257 kunmap_atomic(vaddr_old
);
259 ret
= anon_vma_prepare(vma
);
263 ret
= __replace_page(vma
, vaddr
, old_page
, new_page
);
266 page_cache_release(new_page
);
270 if (unlikely(ret
== -EAGAIN
))
276 * set_swbp - store breakpoint at a given address.
277 * @auprobe: arch specific probepoint information.
278 * @mm: the probed process address space.
279 * @vaddr: the virtual address to insert the opcode.
281 * For mm @mm, store the breakpoint instruction at @vaddr.
282 * Return 0 (success) or a negative errno.
284 int __weak
set_swbp(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
286 return write_opcode(mm
, vaddr
, UPROBE_SWBP_INSN
);
290 * set_orig_insn - Restore the original instruction.
291 * @mm: the probed process address space.
292 * @auprobe: arch specific probepoint information.
293 * @vaddr: the virtual address to insert the opcode.
295 * For mm @mm, restore the original opcode (opcode) at @vaddr.
296 * Return 0 (success) or a negative errno.
299 set_orig_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
301 return write_opcode(mm
, vaddr
, *(uprobe_opcode_t
*)auprobe
->insn
);
304 static int match_uprobe(struct uprobe
*l
, struct uprobe
*r
)
306 if (l
->inode
< r
->inode
)
309 if (l
->inode
> r
->inode
)
312 if (l
->offset
< r
->offset
)
315 if (l
->offset
> r
->offset
)
321 static struct uprobe
*__find_uprobe(struct inode
*inode
, loff_t offset
)
323 struct uprobe u
= { .inode
= inode
, .offset
= offset
};
324 struct rb_node
*n
= uprobes_tree
.rb_node
;
325 struct uprobe
*uprobe
;
329 uprobe
= rb_entry(n
, struct uprobe
, rb_node
);
330 match
= match_uprobe(&u
, uprobe
);
332 atomic_inc(&uprobe
->ref
);
345 * Find a uprobe corresponding to a given inode:offset
346 * Acquires uprobes_treelock
348 static struct uprobe
*find_uprobe(struct inode
*inode
, loff_t offset
)
350 struct uprobe
*uprobe
;
352 spin_lock(&uprobes_treelock
);
353 uprobe
= __find_uprobe(inode
, offset
);
354 spin_unlock(&uprobes_treelock
);
359 static struct uprobe
*__insert_uprobe(struct uprobe
*uprobe
)
361 struct rb_node
**p
= &uprobes_tree
.rb_node
;
362 struct rb_node
*parent
= NULL
;
368 u
= rb_entry(parent
, struct uprobe
, rb_node
);
369 match
= match_uprobe(uprobe
, u
);
376 p
= &parent
->rb_left
;
378 p
= &parent
->rb_right
;
383 rb_link_node(&uprobe
->rb_node
, parent
, p
);
384 rb_insert_color(&uprobe
->rb_node
, &uprobes_tree
);
385 /* get access + creation ref */
386 atomic_set(&uprobe
->ref
, 2);
392 * Acquire uprobes_treelock.
393 * Matching uprobe already exists in rbtree;
394 * increment (access refcount) and return the matching uprobe.
396 * No matching uprobe; insert the uprobe in rb_tree;
397 * get a double refcount (access + creation) and return NULL.
399 static struct uprobe
*insert_uprobe(struct uprobe
*uprobe
)
403 spin_lock(&uprobes_treelock
);
404 u
= __insert_uprobe(uprobe
);
405 spin_unlock(&uprobes_treelock
);
410 static void put_uprobe(struct uprobe
*uprobe
)
412 if (atomic_dec_and_test(&uprobe
->ref
))
416 static struct uprobe
*alloc_uprobe(struct inode
*inode
, loff_t offset
)
418 struct uprobe
*uprobe
, *cur_uprobe
;
420 uprobe
= kzalloc(sizeof(struct uprobe
), GFP_KERNEL
);
424 uprobe
->inode
= igrab(inode
);
425 uprobe
->offset
= offset
;
426 init_rwsem(&uprobe
->register_rwsem
);
427 init_rwsem(&uprobe
->consumer_rwsem
);
428 /* For now assume that the instruction need not be single-stepped */
429 __set_bit(UPROBE_SKIP_SSTEP
, &uprobe
->flags
);
431 /* add to uprobes_tree, sorted on inode:offset */
432 cur_uprobe
= insert_uprobe(uprobe
);
434 /* a uprobe exists for this inode:offset combination */
444 static void consumer_add(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
446 down_write(&uprobe
->consumer_rwsem
);
447 uc
->next
= uprobe
->consumers
;
448 uprobe
->consumers
= uc
;
449 up_write(&uprobe
->consumer_rwsem
);
453 * For uprobe @uprobe, delete the consumer @uc.
454 * Return true if the @uc is deleted successfully
457 static bool consumer_del(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
459 struct uprobe_consumer
**con
;
462 down_write(&uprobe
->consumer_rwsem
);
463 for (con
= &uprobe
->consumers
; *con
; con
= &(*con
)->next
) {
470 up_write(&uprobe
->consumer_rwsem
);
476 __copy_insn(struct address_space
*mapping
, struct file
*filp
, char *insn
,
477 unsigned long nbytes
, loff_t offset
)
487 if (!mapping
->a_ops
->readpage
)
490 idx
= offset
>> PAGE_CACHE_SHIFT
;
491 off
= offset
& ~PAGE_MASK
;
494 * Ensure that the page that has the original instruction is
495 * populated and in page-cache.
497 page
= read_mapping_page(mapping
, idx
, filp
);
499 return PTR_ERR(page
);
501 vaddr
= kmap_atomic(page
);
502 memcpy(insn
, vaddr
+ off
, nbytes
);
503 kunmap_atomic(vaddr
);
504 page_cache_release(page
);
509 static int copy_insn(struct uprobe
*uprobe
, struct file
*filp
)
511 struct address_space
*mapping
;
512 unsigned long nbytes
;
515 nbytes
= PAGE_SIZE
- (uprobe
->offset
& ~PAGE_MASK
);
516 mapping
= uprobe
->inode
->i_mapping
;
518 /* Instruction at end of binary; copy only available bytes */
519 if (uprobe
->offset
+ MAX_UINSN_BYTES
> uprobe
->inode
->i_size
)
520 bytes
= uprobe
->inode
->i_size
- uprobe
->offset
;
522 bytes
= MAX_UINSN_BYTES
;
524 /* Instruction at the page-boundary; copy bytes in second page */
525 if (nbytes
< bytes
) {
526 int err
= __copy_insn(mapping
, filp
, uprobe
->arch
.insn
+ nbytes
,
527 bytes
- nbytes
, uprobe
->offset
+ nbytes
);
532 return __copy_insn(mapping
, filp
, uprobe
->arch
.insn
, bytes
, uprobe
->offset
);
535 static int prepare_uprobe(struct uprobe
*uprobe
, struct file
*file
,
536 struct mm_struct
*mm
, unsigned long vaddr
)
540 if (test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
))
543 /* TODO: move this into _register, until then we abuse this sem. */
544 down_write(&uprobe
->consumer_rwsem
);
545 if (test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
))
548 ret
= copy_insn(uprobe
, file
);
553 if (is_swbp_insn((uprobe_opcode_t
*)uprobe
->arch
.insn
))
556 ret
= arch_uprobe_analyze_insn(&uprobe
->arch
, mm
, vaddr
);
560 /* write_opcode() assumes we don't cross page boundary */
561 BUG_ON((uprobe
->offset
& ~PAGE_MASK
) +
562 UPROBE_SWBP_INSN_SIZE
> PAGE_SIZE
);
564 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
565 set_bit(UPROBE_COPY_INSN
, &uprobe
->flags
);
568 up_write(&uprobe
->consumer_rwsem
);
573 static inline bool consumer_filter(struct uprobe_consumer
*uc
,
574 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
576 return !uc
->filter
|| uc
->filter(uc
, ctx
, mm
);
579 static bool filter_chain(struct uprobe
*uprobe
,
580 enum uprobe_filter_ctx ctx
, struct mm_struct
*mm
)
582 struct uprobe_consumer
*uc
;
585 down_read(&uprobe
->consumer_rwsem
);
586 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
587 ret
= consumer_filter(uc
, ctx
, mm
);
591 up_read(&uprobe
->consumer_rwsem
);
597 install_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
,
598 struct vm_area_struct
*vma
, unsigned long vaddr
)
603 ret
= prepare_uprobe(uprobe
, vma
->vm_file
, mm
, vaddr
);
608 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
609 * the task can hit this breakpoint right after __replace_page().
611 first_uprobe
= !test_bit(MMF_HAS_UPROBES
, &mm
->flags
);
613 set_bit(MMF_HAS_UPROBES
, &mm
->flags
);
615 ret
= set_swbp(&uprobe
->arch
, mm
, vaddr
);
617 clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
618 else if (first_uprobe
)
619 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
625 remove_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
627 set_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
628 return set_orig_insn(&uprobe
->arch
, mm
, vaddr
);
631 static inline bool uprobe_is_active(struct uprobe
*uprobe
)
633 return !RB_EMPTY_NODE(&uprobe
->rb_node
);
636 * There could be threads that have already hit the breakpoint. They
637 * will recheck the current insn and restart if find_uprobe() fails.
638 * See find_active_uprobe().
640 static void delete_uprobe(struct uprobe
*uprobe
)
642 if (WARN_ON(!uprobe_is_active(uprobe
)))
645 spin_lock(&uprobes_treelock
);
646 rb_erase(&uprobe
->rb_node
, &uprobes_tree
);
647 spin_unlock(&uprobes_treelock
);
648 RB_CLEAR_NODE(&uprobe
->rb_node
); /* for uprobe_is_active() */
654 struct map_info
*next
;
655 struct mm_struct
*mm
;
659 static inline struct map_info
*free_map_info(struct map_info
*info
)
661 struct map_info
*next
= info
->next
;
666 static struct map_info
*
667 build_map_info(struct address_space
*mapping
, loff_t offset
, bool is_register
)
669 unsigned long pgoff
= offset
>> PAGE_SHIFT
;
670 struct vm_area_struct
*vma
;
671 struct map_info
*curr
= NULL
;
672 struct map_info
*prev
= NULL
;
673 struct map_info
*info
;
677 mutex_lock(&mapping
->i_mmap_mutex
);
678 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
679 if (!valid_vma(vma
, is_register
))
682 if (!prev
&& !more
) {
684 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
685 * reclaim. This is optimistic, no harm done if it fails.
687 prev
= kmalloc(sizeof(struct map_info
),
688 GFP_NOWAIT
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
697 if (!atomic_inc_not_zero(&vma
->vm_mm
->mm_users
))
705 info
->mm
= vma
->vm_mm
;
706 info
->vaddr
= offset_to_vaddr(vma
, offset
);
708 mutex_unlock(&mapping
->i_mmap_mutex
);
720 info
= kmalloc(sizeof(struct map_info
), GFP_KERNEL
);
722 curr
= ERR_PTR(-ENOMEM
);
732 prev
= free_map_info(prev
);
737 register_for_each_vma(struct uprobe
*uprobe
, struct uprobe_consumer
*new)
739 bool is_register
= !!new;
740 struct map_info
*info
;
743 percpu_down_write(&dup_mmap_sem
);
744 info
= build_map_info(uprobe
->inode
->i_mapping
,
745 uprobe
->offset
, is_register
);
752 struct mm_struct
*mm
= info
->mm
;
753 struct vm_area_struct
*vma
;
755 if (err
&& is_register
)
758 down_write(&mm
->mmap_sem
);
759 vma
= find_vma(mm
, info
->vaddr
);
760 if (!vma
|| !valid_vma(vma
, is_register
) ||
761 vma
->vm_file
->f_mapping
->host
!= uprobe
->inode
)
764 if (vma
->vm_start
> info
->vaddr
||
765 vaddr_to_offset(vma
, info
->vaddr
) != uprobe
->offset
)
769 /* consult only the "caller", new consumer. */
770 if (consumer_filter(new,
771 UPROBE_FILTER_REGISTER
, mm
))
772 err
= install_breakpoint(uprobe
, mm
, vma
, info
->vaddr
);
773 } else if (test_bit(MMF_HAS_UPROBES
, &mm
->flags
)) {
774 if (!filter_chain(uprobe
,
775 UPROBE_FILTER_UNREGISTER
, mm
))
776 err
|= remove_breakpoint(uprobe
, mm
, info
->vaddr
);
780 up_write(&mm
->mmap_sem
);
783 info
= free_map_info(info
);
786 percpu_up_write(&dup_mmap_sem
);
790 static int __uprobe_register(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
792 consumer_add(uprobe
, uc
);
793 return register_for_each_vma(uprobe
, uc
);
796 static void __uprobe_unregister(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
800 if (!consumer_del(uprobe
, uc
)) /* WARN? */
803 err
= register_for_each_vma(uprobe
, NULL
);
804 /* TODO : cant unregister? schedule a worker thread */
805 if (!uprobe
->consumers
&& !err
)
806 delete_uprobe(uprobe
);
810 * uprobe_register - register a probe
811 * @inode: the file in which the probe has to be placed.
812 * @offset: offset from the start of the file.
813 * @uc: information on howto handle the probe..
815 * Apart from the access refcount, uprobe_register() takes a creation
816 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
817 * inserted into the rbtree (i.e first consumer for a @inode:@offset
818 * tuple). Creation refcount stops uprobe_unregister from freeing the
819 * @uprobe even before the register operation is complete. Creation
820 * refcount is released when the last @uc for the @uprobe
823 * Return errno if it cannot successully install probes
824 * else return 0 (success)
826 int uprobe_register(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
828 struct uprobe
*uprobe
;
831 /* Racy, just to catch the obvious mistakes */
832 if (offset
> i_size_read(inode
))
836 uprobe
= alloc_uprobe(inode
, offset
);
840 * We can race with uprobe_unregister()->delete_uprobe().
841 * Check uprobe_is_active() and retry if it is false.
843 down_write(&uprobe
->register_rwsem
);
845 if (likely(uprobe_is_active(uprobe
))) {
846 ret
= __uprobe_register(uprobe
, uc
);
848 __uprobe_unregister(uprobe
, uc
);
850 up_write(&uprobe
->register_rwsem
);
853 if (unlikely(ret
== -EAGAIN
))
857 EXPORT_SYMBOL_GPL(uprobe_register
);
860 * uprobe_apply - unregister a already registered probe.
861 * @inode: the file in which the probe has to be removed.
862 * @offset: offset from the start of the file.
863 * @uc: consumer which wants to add more or remove some breakpoints
864 * @add: add or remove the breakpoints
866 int uprobe_apply(struct inode
*inode
, loff_t offset
,
867 struct uprobe_consumer
*uc
, bool add
)
869 struct uprobe
*uprobe
;
870 struct uprobe_consumer
*con
;
873 uprobe
= find_uprobe(inode
, offset
);
877 down_write(&uprobe
->register_rwsem
);
878 for (con
= uprobe
->consumers
; con
&& con
!= uc
; con
= con
->next
)
881 ret
= register_for_each_vma(uprobe
, add
? uc
: NULL
);
882 up_write(&uprobe
->register_rwsem
);
889 * uprobe_unregister - unregister a already registered probe.
890 * @inode: the file in which the probe has to be removed.
891 * @offset: offset from the start of the file.
892 * @uc: identify which probe if multiple probes are colocated.
894 void uprobe_unregister(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
896 struct uprobe
*uprobe
;
898 uprobe
= find_uprobe(inode
, offset
);
902 down_write(&uprobe
->register_rwsem
);
903 __uprobe_unregister(uprobe
, uc
);
904 up_write(&uprobe
->register_rwsem
);
907 EXPORT_SYMBOL_GPL(uprobe_unregister
);
909 static int unapply_uprobe(struct uprobe
*uprobe
, struct mm_struct
*mm
)
911 struct vm_area_struct
*vma
;
914 down_read(&mm
->mmap_sem
);
915 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
919 if (!valid_vma(vma
, false) ||
920 vma
->vm_file
->f_mapping
->host
!= uprobe
->inode
)
923 offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
924 if (uprobe
->offset
< offset
||
925 uprobe
->offset
>= offset
+ vma
->vm_end
- vma
->vm_start
)
928 vaddr
= offset_to_vaddr(vma
, uprobe
->offset
);
929 err
|= remove_breakpoint(uprobe
, mm
, vaddr
);
931 up_read(&mm
->mmap_sem
);
936 static struct rb_node
*
937 find_node_in_range(struct inode
*inode
, loff_t min
, loff_t max
)
939 struct rb_node
*n
= uprobes_tree
.rb_node
;
942 struct uprobe
*u
= rb_entry(n
, struct uprobe
, rb_node
);
944 if (inode
< u
->inode
) {
946 } else if (inode
> u
->inode
) {
951 else if (min
> u
->offset
)
962 * For a given range in vma, build a list of probes that need to be inserted.
964 static void build_probe_list(struct inode
*inode
,
965 struct vm_area_struct
*vma
,
966 unsigned long start
, unsigned long end
,
967 struct list_head
*head
)
970 struct rb_node
*n
, *t
;
973 INIT_LIST_HEAD(head
);
974 min
= vaddr_to_offset(vma
, start
);
975 max
= min
+ (end
- start
) - 1;
977 spin_lock(&uprobes_treelock
);
978 n
= find_node_in_range(inode
, min
, max
);
980 for (t
= n
; t
; t
= rb_prev(t
)) {
981 u
= rb_entry(t
, struct uprobe
, rb_node
);
982 if (u
->inode
!= inode
|| u
->offset
< min
)
984 list_add(&u
->pending_list
, head
);
987 for (t
= n
; (t
= rb_next(t
)); ) {
988 u
= rb_entry(t
, struct uprobe
, rb_node
);
989 if (u
->inode
!= inode
|| u
->offset
> max
)
991 list_add(&u
->pending_list
, head
);
995 spin_unlock(&uprobes_treelock
);
999 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1001 * Currently we ignore all errors and always return 0, the callers
1002 * can't handle the failure anyway.
1004 int uprobe_mmap(struct vm_area_struct
*vma
)
1006 struct list_head tmp_list
;
1007 struct uprobe
*uprobe
, *u
;
1008 struct inode
*inode
;
1010 if (no_uprobe_events() || !valid_vma(vma
, true))
1013 inode
= vma
->vm_file
->f_mapping
->host
;
1017 mutex_lock(uprobes_mmap_hash(inode
));
1018 build_probe_list(inode
, vma
, vma
->vm_start
, vma
->vm_end
, &tmp_list
);
1020 * We can race with uprobe_unregister(), this uprobe can be already
1021 * removed. But in this case filter_chain() must return false, all
1022 * consumers have gone away.
1024 list_for_each_entry_safe(uprobe
, u
, &tmp_list
, pending_list
) {
1025 if (!fatal_signal_pending(current
) &&
1026 filter_chain(uprobe
, UPROBE_FILTER_MMAP
, vma
->vm_mm
)) {
1027 unsigned long vaddr
= offset_to_vaddr(vma
, uprobe
->offset
);
1028 install_breakpoint(uprobe
, vma
->vm_mm
, vma
, vaddr
);
1032 mutex_unlock(uprobes_mmap_hash(inode
));
1038 vma_has_uprobes(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1041 struct inode
*inode
;
1044 inode
= vma
->vm_file
->f_mapping
->host
;
1046 min
= vaddr_to_offset(vma
, start
);
1047 max
= min
+ (end
- start
) - 1;
1049 spin_lock(&uprobes_treelock
);
1050 n
= find_node_in_range(inode
, min
, max
);
1051 spin_unlock(&uprobes_treelock
);
1057 * Called in context of a munmap of a vma.
1059 void uprobe_munmap(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1061 if (no_uprobe_events() || !valid_vma(vma
, false))
1064 if (!atomic_read(&vma
->vm_mm
->mm_users
)) /* called by mmput() ? */
1067 if (!test_bit(MMF_HAS_UPROBES
, &vma
->vm_mm
->flags
) ||
1068 test_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
))
1071 if (vma_has_uprobes(vma
, start
, end
))
1072 set_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
);
1075 /* Slot allocation for XOL */
1076 static int xol_add_vma(struct xol_area
*area
)
1078 struct mm_struct
*mm
= current
->mm
;
1079 int ret
= -EALREADY
;
1081 down_write(&mm
->mmap_sem
);
1082 if (mm
->uprobes_state
.xol_area
)
1086 /* Try to map as high as possible, this is only a hint. */
1087 area
->vaddr
= get_unmapped_area(NULL
, TASK_SIZE
- PAGE_SIZE
, PAGE_SIZE
, 0, 0);
1088 if (area
->vaddr
& ~PAGE_MASK
) {
1093 ret
= install_special_mapping(mm
, area
->vaddr
, PAGE_SIZE
,
1094 VM_EXEC
|VM_MAYEXEC
|VM_DONTCOPY
|VM_IO
, &area
->page
);
1098 smp_wmb(); /* pairs with get_xol_area() */
1099 mm
->uprobes_state
.xol_area
= area
;
1102 up_write(&mm
->mmap_sem
);
1108 * get_xol_area - Allocate process's xol_area if necessary.
1109 * This area will be used for storing instructions for execution out of line.
1111 * Returns the allocated area or NULL.
1113 static struct xol_area
*get_xol_area(void)
1115 struct mm_struct
*mm
= current
->mm
;
1116 struct xol_area
*area
;
1118 area
= mm
->uprobes_state
.xol_area
;
1122 area
= kzalloc(sizeof(*area
), GFP_KERNEL
);
1123 if (unlikely(!area
))
1126 area
->bitmap
= kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE
) * sizeof(long), GFP_KERNEL
);
1130 area
->page
= alloc_page(GFP_HIGHUSER
);
1134 init_waitqueue_head(&area
->wq
);
1135 if (!xol_add_vma(area
))
1138 __free_page(area
->page
);
1140 kfree(area
->bitmap
);
1144 area
= mm
->uprobes_state
.xol_area
;
1146 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1151 * uprobe_clear_state - Free the area allocated for slots.
1153 void uprobe_clear_state(struct mm_struct
*mm
)
1155 struct xol_area
*area
= mm
->uprobes_state
.xol_area
;
1160 put_page(area
->page
);
1161 kfree(area
->bitmap
);
1165 void uprobe_start_dup_mmap(void)
1167 percpu_down_read(&dup_mmap_sem
);
1170 void uprobe_end_dup_mmap(void)
1172 percpu_up_read(&dup_mmap_sem
);
1175 void uprobe_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*newmm
)
1177 newmm
->uprobes_state
.xol_area
= NULL
;
1179 if (test_bit(MMF_HAS_UPROBES
, &oldmm
->flags
)) {
1180 set_bit(MMF_HAS_UPROBES
, &newmm
->flags
);
1181 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1182 set_bit(MMF_RECALC_UPROBES
, &newmm
->flags
);
1187 * - search for a free slot.
1189 static unsigned long xol_take_insn_slot(struct xol_area
*area
)
1191 unsigned long slot_addr
;
1195 slot_nr
= find_first_zero_bit(area
->bitmap
, UINSNS_PER_PAGE
);
1196 if (slot_nr
< UINSNS_PER_PAGE
) {
1197 if (!test_and_set_bit(slot_nr
, area
->bitmap
))
1200 slot_nr
= UINSNS_PER_PAGE
;
1203 wait_event(area
->wq
, (atomic_read(&area
->slot_count
) < UINSNS_PER_PAGE
));
1204 } while (slot_nr
>= UINSNS_PER_PAGE
);
1206 slot_addr
= area
->vaddr
+ (slot_nr
* UPROBE_XOL_SLOT_BYTES
);
1207 atomic_inc(&area
->slot_count
);
1213 * xol_get_insn_slot - allocate a slot for xol.
1214 * Returns the allocated slot address or 0.
1216 static unsigned long xol_get_insn_slot(struct uprobe
*uprobe
)
1218 struct xol_area
*area
;
1219 unsigned long offset
;
1220 unsigned long xol_vaddr
;
1223 area
= get_xol_area();
1227 xol_vaddr
= xol_take_insn_slot(area
);
1228 if (unlikely(!xol_vaddr
))
1231 /* Initialize the slot */
1232 offset
= xol_vaddr
& ~PAGE_MASK
;
1233 vaddr
= kmap_atomic(area
->page
);
1234 memcpy(vaddr
+ offset
, uprobe
->arch
.insn
, MAX_UINSN_BYTES
);
1235 kunmap_atomic(vaddr
);
1237 * We probably need flush_icache_user_range() but it needs vma.
1238 * This should work on supported architectures too.
1240 flush_dcache_page(area
->page
);
1246 * xol_free_insn_slot - If slot was earlier allocated by
1247 * @xol_get_insn_slot(), make the slot available for
1248 * subsequent requests.
1250 static void xol_free_insn_slot(struct task_struct
*tsk
)
1252 struct xol_area
*area
;
1253 unsigned long vma_end
;
1254 unsigned long slot_addr
;
1256 if (!tsk
->mm
|| !tsk
->mm
->uprobes_state
.xol_area
|| !tsk
->utask
)
1259 slot_addr
= tsk
->utask
->xol_vaddr
;
1260 if (unlikely(!slot_addr
))
1263 area
= tsk
->mm
->uprobes_state
.xol_area
;
1264 vma_end
= area
->vaddr
+ PAGE_SIZE
;
1265 if (area
->vaddr
<= slot_addr
&& slot_addr
< vma_end
) {
1266 unsigned long offset
;
1269 offset
= slot_addr
- area
->vaddr
;
1270 slot_nr
= offset
/ UPROBE_XOL_SLOT_BYTES
;
1271 if (slot_nr
>= UINSNS_PER_PAGE
)
1274 clear_bit(slot_nr
, area
->bitmap
);
1275 atomic_dec(&area
->slot_count
);
1276 if (waitqueue_active(&area
->wq
))
1279 tsk
->utask
->xol_vaddr
= 0;
1284 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1285 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1287 * Return the address of the breakpoint instruction.
1289 unsigned long __weak
uprobe_get_swbp_addr(struct pt_regs
*regs
)
1291 return instruction_pointer(regs
) - UPROBE_SWBP_INSN_SIZE
;
1295 * Called with no locks held.
1296 * Called in context of a exiting or a exec-ing thread.
1298 void uprobe_free_utask(struct task_struct
*t
)
1300 struct uprobe_task
*utask
= t
->utask
;
1305 if (utask
->active_uprobe
)
1306 put_uprobe(utask
->active_uprobe
);
1308 xol_free_insn_slot(t
);
1314 * Called in context of a new clone/fork from copy_process.
1316 void uprobe_copy_process(struct task_struct
*t
)
1322 * Allocate a uprobe_task object for the task if if necessary.
1323 * Called when the thread hits a breakpoint.
1326 * - pointer to new uprobe_task on success
1329 static struct uprobe_task
*get_utask(void)
1331 if (!current
->utask
)
1332 current
->utask
= kzalloc(sizeof(struct uprobe_task
), GFP_KERNEL
);
1333 return current
->utask
;
1336 /* Prepare to single-step probed instruction out of line. */
1338 pre_ssout(struct uprobe
*uprobe
, struct pt_regs
*regs
, unsigned long bp_vaddr
)
1340 struct uprobe_task
*utask
;
1341 unsigned long xol_vaddr
;
1344 utask
= get_utask();
1348 xol_vaddr
= xol_get_insn_slot(uprobe
);
1352 utask
->xol_vaddr
= xol_vaddr
;
1353 utask
->vaddr
= bp_vaddr
;
1355 err
= arch_uprobe_pre_xol(&uprobe
->arch
, regs
);
1356 if (unlikely(err
)) {
1357 xol_free_insn_slot(current
);
1361 utask
->active_uprobe
= uprobe
;
1362 utask
->state
= UTASK_SSTEP
;
1367 * If we are singlestepping, then ensure this thread is not connected to
1368 * non-fatal signals until completion of singlestep. When xol insn itself
1369 * triggers the signal, restart the original insn even if the task is
1370 * already SIGKILL'ed (since coredump should report the correct ip). This
1371 * is even more important if the task has a handler for SIGSEGV/etc, The
1372 * _same_ instruction should be repeated again after return from the signal
1373 * handler, and SSTEP can never finish in this case.
1375 bool uprobe_deny_signal(void)
1377 struct task_struct
*t
= current
;
1378 struct uprobe_task
*utask
= t
->utask
;
1380 if (likely(!utask
|| !utask
->active_uprobe
))
1383 WARN_ON_ONCE(utask
->state
!= UTASK_SSTEP
);
1385 if (signal_pending(t
)) {
1386 spin_lock_irq(&t
->sighand
->siglock
);
1387 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
1388 spin_unlock_irq(&t
->sighand
->siglock
);
1390 if (__fatal_signal_pending(t
) || arch_uprobe_xol_was_trapped(t
)) {
1391 utask
->state
= UTASK_SSTEP_TRAPPED
;
1392 set_tsk_thread_flag(t
, TIF_UPROBE
);
1393 set_tsk_thread_flag(t
, TIF_NOTIFY_RESUME
);
1401 * Avoid singlestepping the original instruction if the original instruction
1402 * is a NOP or can be emulated.
1404 static bool can_skip_sstep(struct uprobe
*uprobe
, struct pt_regs
*regs
)
1406 if (test_bit(UPROBE_SKIP_SSTEP
, &uprobe
->flags
)) {
1407 if (arch_uprobe_skip_sstep(&uprobe
->arch
, regs
))
1409 clear_bit(UPROBE_SKIP_SSTEP
, &uprobe
->flags
);
1414 static void mmf_recalc_uprobes(struct mm_struct
*mm
)
1416 struct vm_area_struct
*vma
;
1418 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1419 if (!valid_vma(vma
, false))
1422 * This is not strictly accurate, we can race with
1423 * uprobe_unregister() and see the already removed
1424 * uprobe if delete_uprobe() was not yet called.
1425 * Or this uprobe can be filtered out.
1427 if (vma_has_uprobes(vma
, vma
->vm_start
, vma
->vm_end
))
1431 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
1434 static int is_swbp_at_addr(struct mm_struct
*mm
, unsigned long vaddr
)
1437 uprobe_opcode_t opcode
;
1440 pagefault_disable();
1441 result
= __copy_from_user_inatomic(&opcode
, (void __user
*)vaddr
,
1445 if (likely(result
== 0))
1448 result
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 1, &page
, NULL
);
1452 copy_opcode(page
, vaddr
, &opcode
);
1455 return is_swbp_insn(&opcode
);
1458 static struct uprobe
*find_active_uprobe(unsigned long bp_vaddr
, int *is_swbp
)
1460 struct mm_struct
*mm
= current
->mm
;
1461 struct uprobe
*uprobe
= NULL
;
1462 struct vm_area_struct
*vma
;
1464 down_read(&mm
->mmap_sem
);
1465 vma
= find_vma(mm
, bp_vaddr
);
1466 if (vma
&& vma
->vm_start
<= bp_vaddr
) {
1467 if (valid_vma(vma
, false)) {
1468 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1469 loff_t offset
= vaddr_to_offset(vma
, bp_vaddr
);
1471 uprobe
= find_uprobe(inode
, offset
);
1475 *is_swbp
= is_swbp_at_addr(mm
, bp_vaddr
);
1480 if (!uprobe
&& test_and_clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
))
1481 mmf_recalc_uprobes(mm
);
1482 up_read(&mm
->mmap_sem
);
1487 static void handler_chain(struct uprobe
*uprobe
, struct pt_regs
*regs
)
1489 struct uprobe_consumer
*uc
;
1490 int remove
= UPROBE_HANDLER_REMOVE
;
1492 down_read(&uprobe
->register_rwsem
);
1493 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
1494 int rc
= uc
->handler(uc
, regs
);
1496 WARN(rc
& ~UPROBE_HANDLER_MASK
,
1497 "bad rc=0x%x from %pf()\n", rc
, uc
->handler
);
1501 if (remove
&& uprobe
->consumers
) {
1502 WARN_ON(!uprobe_is_active(uprobe
));
1503 unapply_uprobe(uprobe
, current
->mm
);
1505 up_read(&uprobe
->register_rwsem
);
1509 * Run handler and ask thread to singlestep.
1510 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1512 static void handle_swbp(struct pt_regs
*regs
)
1514 struct uprobe
*uprobe
;
1515 unsigned long bp_vaddr
;
1516 int uninitialized_var(is_swbp
);
1518 bp_vaddr
= uprobe_get_swbp_addr(regs
);
1519 uprobe
= find_active_uprobe(bp_vaddr
, &is_swbp
);
1523 /* No matching uprobe; signal SIGTRAP. */
1524 send_sig(SIGTRAP
, current
, 0);
1527 * Either we raced with uprobe_unregister() or we can't
1528 * access this memory. The latter is only possible if
1529 * another thread plays with our ->mm. In both cases
1530 * we can simply restart. If this vma was unmapped we
1531 * can pretend this insn was not executed yet and get
1532 * the (correct) SIGSEGV after restart.
1534 instruction_pointer_set(regs
, bp_vaddr
);
1539 /* change it in advance for ->handler() and restart */
1540 instruction_pointer_set(regs
, bp_vaddr
);
1543 * TODO: move copy_insn/etc into _register and remove this hack.
1544 * After we hit the bp, _unregister + _register can install the
1545 * new and not-yet-analyzed uprobe at the same address, restart.
1547 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1548 if (unlikely(!test_bit(UPROBE_COPY_INSN
, &uprobe
->flags
)))
1551 handler_chain(uprobe
, regs
);
1552 if (can_skip_sstep(uprobe
, regs
))
1555 if (!pre_ssout(uprobe
, regs
, bp_vaddr
))
1558 /* can_skip_sstep() succeeded, or restart if can't singlestep */
1564 * Perform required fix-ups and disable singlestep.
1565 * Allow pending signals to take effect.
1567 static void handle_singlestep(struct uprobe_task
*utask
, struct pt_regs
*regs
)
1569 struct uprobe
*uprobe
;
1571 uprobe
= utask
->active_uprobe
;
1572 if (utask
->state
== UTASK_SSTEP_ACK
)
1573 arch_uprobe_post_xol(&uprobe
->arch
, regs
);
1574 else if (utask
->state
== UTASK_SSTEP_TRAPPED
)
1575 arch_uprobe_abort_xol(&uprobe
->arch
, regs
);
1580 utask
->active_uprobe
= NULL
;
1581 utask
->state
= UTASK_RUNNING
;
1582 xol_free_insn_slot(current
);
1584 spin_lock_irq(¤t
->sighand
->siglock
);
1585 recalc_sigpending(); /* see uprobe_deny_signal() */
1586 spin_unlock_irq(¤t
->sighand
->siglock
);
1590 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1591 * allows the thread to return from interrupt. After that handle_swbp()
1592 * sets utask->active_uprobe.
1594 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1595 * and allows the thread to return from interrupt.
1597 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1598 * uprobe_notify_resume().
1600 void uprobe_notify_resume(struct pt_regs
*regs
)
1602 struct uprobe_task
*utask
;
1604 clear_thread_flag(TIF_UPROBE
);
1606 utask
= current
->utask
;
1607 if (utask
&& utask
->active_uprobe
)
1608 handle_singlestep(utask
, regs
);
1614 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1615 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1617 int uprobe_pre_sstep_notifier(struct pt_regs
*regs
)
1619 if (!current
->mm
|| !test_bit(MMF_HAS_UPROBES
, ¤t
->mm
->flags
))
1622 set_thread_flag(TIF_UPROBE
);
1627 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1628 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1630 int uprobe_post_sstep_notifier(struct pt_regs
*regs
)
1632 struct uprobe_task
*utask
= current
->utask
;
1634 if (!current
->mm
|| !utask
|| !utask
->active_uprobe
)
1635 /* task is currently not uprobed */
1638 utask
->state
= UTASK_SSTEP_ACK
;
1639 set_thread_flag(TIF_UPROBE
);
1643 static struct notifier_block uprobe_exception_nb
= {
1644 .notifier_call
= arch_uprobe_exception_notify
,
1645 .priority
= INT_MAX
-1, /* notified after kprobes, kgdb */
1648 static int __init
init_uprobes(void)
1652 for (i
= 0; i
< UPROBES_HASH_SZ
; i
++)
1653 mutex_init(&uprobes_mmap_mutex
[i
]);
1655 if (percpu_init_rwsem(&dup_mmap_sem
))
1658 return register_die_notifier(&uprobe_exception_nb
);
1660 module_init(init_uprobes
);
1662 static void __exit
exit_uprobes(void)
1665 module_exit(exit_uprobes
);