2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 #include <linux/memory.h>
48 #include <asm-generic/sections.h>
49 #include <asm/cacheflush.h>
50 #include <asm/errno.h>
51 #include <asm/uaccess.h>
53 #define KPROBE_HASH_BITS 6
54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
61 #ifndef kprobe_lookup_name
62 #define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
66 static int kprobes_initialized
;
67 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
68 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
70 /* NOTE: change this value only with kprobe_mutex held */
71 static bool kprobes_all_disarmed
;
73 static DEFINE_MUTEX(kprobe_mutex
); /* Protects kprobe_table */
74 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
76 spinlock_t lock ____cacheline_aligned_in_smp
;
77 } kretprobe_table_locks
[KPROBE_TABLE_SIZE
];
79 static spinlock_t
*kretprobe_table_lock_ptr(unsigned long hash
)
81 return &(kretprobe_table_locks
[hash
].lock
);
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
89 * For such cases, we now have a blacklist
91 static struct kprobe_blackpoint kprobe_blacklist
[] = {
92 {"preempt_schedule",},
93 {"native_get_debugreg",},
94 {"irq_entries_start",},
95 {"common_interrupt",},
96 {NULL
} /* Terminator */
99 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
101 * kprobe->ainsn.insn points to the copy of the instruction to be
102 * single-stepped. x86_64, POWER4 and above have no-exec support and
103 * stepping on the instruction on a vmalloced/kmalloced/data page
104 * is a recipe for disaster
106 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
108 struct kprobe_insn_page
{
109 struct list_head list
;
110 kprobe_opcode_t
*insns
; /* Page of instruction slots */
111 char slot_used
[INSNS_PER_PAGE
];
116 enum kprobe_slot_state
{
122 static DEFINE_MUTEX(kprobe_insn_mutex
); /* Protects kprobe_insn_pages */
123 static LIST_HEAD(kprobe_insn_pages
);
124 static int kprobe_garbage_slots
;
125 static int collect_garbage_slots(void);
127 static int __kprobes
check_safety(void)
130 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
131 ret
= freeze_processes();
133 struct task_struct
*p
, *q
;
134 do_each_thread(p
, q
) {
135 if (p
!= current
&& p
->state
== TASK_RUNNING
&&
137 printk("Check failed: %s is running\n",p
->comm
);
141 } while_each_thread(p
, q
);
152 * __get_insn_slot() - Find a slot on an executable page for an instruction.
153 * We allocate an executable page if there's no room on existing ones.
155 static kprobe_opcode_t __kprobes
*__get_insn_slot(void)
157 struct kprobe_insn_page
*kip
;
160 list_for_each_entry(kip
, &kprobe_insn_pages
, list
) {
161 if (kip
->nused
< INSNS_PER_PAGE
) {
163 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
164 if (kip
->slot_used
[i
] == SLOT_CLEAN
) {
165 kip
->slot_used
[i
] = SLOT_USED
;
167 return kip
->insns
+ (i
* MAX_INSN_SIZE
);
170 /* Surprise! No unused slots. Fix kip->nused. */
171 kip
->nused
= INSNS_PER_PAGE
;
175 /* If there are any garbage slots, collect it and try again. */
176 if (kprobe_garbage_slots
&& collect_garbage_slots() == 0) {
179 /* All out of space. Need to allocate a new page. Use slot 0. */
180 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
185 * Use module_alloc so this page is within +/- 2GB of where the
186 * kernel image and loaded module images reside. This is required
187 * so x86_64 can correctly handle the %rip-relative fixups.
189 kip
->insns
= module_alloc(PAGE_SIZE
);
194 INIT_LIST_HEAD(&kip
->list
);
195 list_add(&kip
->list
, &kprobe_insn_pages
);
196 memset(kip
->slot_used
, SLOT_CLEAN
, INSNS_PER_PAGE
);
197 kip
->slot_used
[0] = SLOT_USED
;
203 kprobe_opcode_t __kprobes
*get_insn_slot(void)
205 kprobe_opcode_t
*ret
;
206 mutex_lock(&kprobe_insn_mutex
);
207 ret
= __get_insn_slot();
208 mutex_unlock(&kprobe_insn_mutex
);
212 /* Return 1 if all garbages are collected, otherwise 0. */
213 static int __kprobes
collect_one_slot(struct kprobe_insn_page
*kip
, int idx
)
215 kip
->slot_used
[idx
] = SLOT_CLEAN
;
217 if (kip
->nused
== 0) {
219 * Page is no longer in use. Free it unless
220 * it's the last one. We keep the last one
221 * so as not to have to set it up again the
222 * next time somebody inserts a probe.
224 if (!list_is_singular(&kprobe_insn_pages
)) {
225 list_del(&kip
->list
);
226 module_free(NULL
, kip
->insns
);
234 static int __kprobes
collect_garbage_slots(void)
236 struct kprobe_insn_page
*kip
, *next
;
238 /* Ensure no-one is preepmted on the garbages */
242 list_for_each_entry_safe(kip
, next
, &kprobe_insn_pages
, list
) {
244 if (kip
->ngarbage
== 0)
246 kip
->ngarbage
= 0; /* we will collect all garbages */
247 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
248 if (kip
->slot_used
[i
] == SLOT_DIRTY
&&
249 collect_one_slot(kip
, i
))
253 kprobe_garbage_slots
= 0;
257 void __kprobes
free_insn_slot(kprobe_opcode_t
* slot
, int dirty
)
259 struct kprobe_insn_page
*kip
;
261 mutex_lock(&kprobe_insn_mutex
);
262 list_for_each_entry(kip
, &kprobe_insn_pages
, list
) {
263 if (kip
->insns
<= slot
&&
264 slot
< kip
->insns
+ (INSNS_PER_PAGE
* MAX_INSN_SIZE
)) {
265 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
267 kip
->slot_used
[i
] = SLOT_DIRTY
;
270 collect_one_slot(kip
, i
);
275 if (dirty
&& ++kprobe_garbage_slots
> INSNS_PER_PAGE
)
276 collect_garbage_slots();
278 mutex_unlock(&kprobe_insn_mutex
);
282 /* We have preemption disabled.. so it is safe to use __ versions */
283 static inline void set_kprobe_instance(struct kprobe
*kp
)
285 __get_cpu_var(kprobe_instance
) = kp
;
288 static inline void reset_kprobe_instance(void)
290 __get_cpu_var(kprobe_instance
) = NULL
;
294 * This routine is called either:
295 * - under the kprobe_mutex - during kprobe_[un]register()
297 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
299 struct kprobe __kprobes
*get_kprobe(void *addr
)
301 struct hlist_head
*head
;
302 struct hlist_node
*node
;
305 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
306 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
313 /* Arm a kprobe with text_mutex */
314 static void __kprobes
arm_kprobe(struct kprobe
*kp
)
316 mutex_lock(&text_mutex
);
318 mutex_unlock(&text_mutex
);
321 /* Disarm a kprobe with text_mutex */
322 static void __kprobes
disarm_kprobe(struct kprobe
*kp
)
324 mutex_lock(&text_mutex
);
325 arch_disarm_kprobe(kp
);
326 mutex_unlock(&text_mutex
);
330 * Aggregate handlers for multiple kprobes support - these handlers
331 * take care of invoking the individual kprobe handlers on p->list
333 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
337 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
338 if (kp
->pre_handler
&& likely(!kprobe_disabled(kp
))) {
339 set_kprobe_instance(kp
);
340 if (kp
->pre_handler(kp
, regs
))
343 reset_kprobe_instance();
348 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
353 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
354 if (kp
->post_handler
&& likely(!kprobe_disabled(kp
))) {
355 set_kprobe_instance(kp
);
356 kp
->post_handler(kp
, regs
, flags
);
357 reset_kprobe_instance();
362 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
365 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
368 * if we faulted "during" the execution of a user specified
369 * probe handler, invoke just that probe's fault handler
371 if (cur
&& cur
->fault_handler
) {
372 if (cur
->fault_handler(cur
, regs
, trapnr
))
378 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
380 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
383 if (cur
&& cur
->break_handler
) {
384 if (cur
->break_handler(cur
, regs
))
387 reset_kprobe_instance();
391 /* Walks the list and increments nmissed count for multiprobe case */
392 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
395 if (p
->pre_handler
!= aggr_pre_handler
) {
398 list_for_each_entry_rcu(kp
, &p
->list
, list
)
404 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
,
405 struct hlist_head
*head
)
407 struct kretprobe
*rp
= ri
->rp
;
409 /* remove rp inst off the rprobe_inst_table */
410 hlist_del(&ri
->hlist
);
411 INIT_HLIST_NODE(&ri
->hlist
);
413 spin_lock(&rp
->lock
);
414 hlist_add_head(&ri
->hlist
, &rp
->free_instances
);
415 spin_unlock(&rp
->lock
);
418 hlist_add_head(&ri
->hlist
, head
);
421 void __kprobes
kretprobe_hash_lock(struct task_struct
*tsk
,
422 struct hlist_head
**head
, unsigned long *flags
)
424 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
425 spinlock_t
*hlist_lock
;
427 *head
= &kretprobe_inst_table
[hash
];
428 hlist_lock
= kretprobe_table_lock_ptr(hash
);
429 spin_lock_irqsave(hlist_lock
, *flags
);
432 static void __kprobes
kretprobe_table_lock(unsigned long hash
,
433 unsigned long *flags
)
435 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
436 spin_lock_irqsave(hlist_lock
, *flags
);
439 void __kprobes
kretprobe_hash_unlock(struct task_struct
*tsk
,
440 unsigned long *flags
)
442 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
443 spinlock_t
*hlist_lock
;
445 hlist_lock
= kretprobe_table_lock_ptr(hash
);
446 spin_unlock_irqrestore(hlist_lock
, *flags
);
449 void __kprobes
kretprobe_table_unlock(unsigned long hash
, unsigned long *flags
)
451 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
452 spin_unlock_irqrestore(hlist_lock
, *flags
);
456 * This function is called from finish_task_switch when task tk becomes dead,
457 * so that we can recycle any function-return probe instances associated
458 * with this task. These left over instances represent probed functions
459 * that have been called but will never return.
461 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
463 struct kretprobe_instance
*ri
;
464 struct hlist_head
*head
, empty_rp
;
465 struct hlist_node
*node
, *tmp
;
466 unsigned long hash
, flags
= 0;
468 if (unlikely(!kprobes_initialized
))
469 /* Early boot. kretprobe_table_locks not yet initialized. */
472 hash
= hash_ptr(tk
, KPROBE_HASH_BITS
);
473 head
= &kretprobe_inst_table
[hash
];
474 kretprobe_table_lock(hash
, &flags
);
475 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
477 recycle_rp_inst(ri
, &empty_rp
);
479 kretprobe_table_unlock(hash
, &flags
);
480 INIT_HLIST_HEAD(&empty_rp
);
481 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
482 hlist_del(&ri
->hlist
);
487 static inline void free_rp_inst(struct kretprobe
*rp
)
489 struct kretprobe_instance
*ri
;
490 struct hlist_node
*pos
, *next
;
492 hlist_for_each_entry_safe(ri
, pos
, next
, &rp
->free_instances
, hlist
) {
493 hlist_del(&ri
->hlist
);
498 static void __kprobes
cleanup_rp_inst(struct kretprobe
*rp
)
500 unsigned long flags
, hash
;
501 struct kretprobe_instance
*ri
;
502 struct hlist_node
*pos
, *next
;
503 struct hlist_head
*head
;
506 for (hash
= 0; hash
< KPROBE_TABLE_SIZE
; hash
++) {
507 kretprobe_table_lock(hash
, &flags
);
508 head
= &kretprobe_inst_table
[hash
];
509 hlist_for_each_entry_safe(ri
, pos
, next
, head
, hlist
) {
513 kretprobe_table_unlock(hash
, &flags
);
519 * Keep all fields in the kprobe consistent
521 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
523 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
524 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
528 * Add the new probe to ap->list. Fail if this is the
529 * second jprobe at the address - two jprobes can't coexist
531 static int __kprobes
add_new_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
533 BUG_ON(kprobe_gone(ap
) || kprobe_gone(p
));
534 if (p
->break_handler
) {
535 if (ap
->break_handler
)
537 list_add_tail_rcu(&p
->list
, &ap
->list
);
538 ap
->break_handler
= aggr_break_handler
;
540 list_add_rcu(&p
->list
, &ap
->list
);
541 if (p
->post_handler
&& !ap
->post_handler
)
542 ap
->post_handler
= aggr_post_handler
;
544 if (kprobe_disabled(ap
) && !kprobe_disabled(p
)) {
545 ap
->flags
&= ~KPROBE_FLAG_DISABLED
;
546 if (!kprobes_all_disarmed
)
547 /* Arm the breakpoint again. */
554 * Fill in the required fields of the "manager kprobe". Replace the
555 * earlier kprobe in the hlist with the manager kprobe
557 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
562 ap
->flags
= p
->flags
;
563 ap
->pre_handler
= aggr_pre_handler
;
564 ap
->fault_handler
= aggr_fault_handler
;
565 /* We don't care the kprobe which has gone. */
566 if (p
->post_handler
&& !kprobe_gone(p
))
567 ap
->post_handler
= aggr_post_handler
;
568 if (p
->break_handler
&& !kprobe_gone(p
))
569 ap
->break_handler
= aggr_break_handler
;
571 INIT_LIST_HEAD(&ap
->list
);
572 list_add_rcu(&p
->list
, &ap
->list
);
574 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
578 * This is the second or subsequent kprobe at the address - handle
581 static int __kprobes
register_aggr_kprobe(struct kprobe
*old_p
,
585 struct kprobe
*ap
= old_p
;
587 if (old_p
->pre_handler
!= aggr_pre_handler
) {
588 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
589 ap
= kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
592 add_aggr_kprobe(ap
, old_p
);
595 if (kprobe_gone(ap
)) {
597 * Attempting to insert new probe at the same location that
598 * had a probe in the module vaddr area which already
599 * freed. So, the instruction slot has already been
600 * released. We need a new slot for the new probe.
602 ret
= arch_prepare_kprobe(ap
);
605 * Even if fail to allocate new slot, don't need to
606 * free aggr_probe. It will be used next time, or
607 * freed by unregister_kprobe.
612 * Clear gone flag to prevent allocating new slot again, and
613 * set disabled flag because it is not armed yet.
615 ap
->flags
= (ap
->flags
& ~KPROBE_FLAG_GONE
)
616 | KPROBE_FLAG_DISABLED
;
620 return add_new_kprobe(ap
, p
);
623 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
624 static int __kprobes
try_to_disable_aggr_kprobe(struct kprobe
*p
)
628 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
629 if (!kprobe_disabled(kp
))
631 * There is an active probe on the list.
632 * We can't disable aggr_kprobe.
636 p
->flags
|= KPROBE_FLAG_DISABLED
;
640 static int __kprobes
in_kprobes_functions(unsigned long addr
)
642 struct kprobe_blackpoint
*kb
;
644 if (addr
>= (unsigned long)__kprobes_text_start
&&
645 addr
< (unsigned long)__kprobes_text_end
)
648 * If there exists a kprobe_blacklist, verify and
649 * fail any probe registration in the prohibited area
651 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
652 if (kb
->start_addr
) {
653 if (addr
>= kb
->start_addr
&&
654 addr
< (kb
->start_addr
+ kb
->range
))
662 * If we have a symbol_name argument, look it up and add the offset field
663 * to it. This way, we can specify a relative address to a symbol.
665 static kprobe_opcode_t __kprobes
*kprobe_addr(struct kprobe
*p
)
667 kprobe_opcode_t
*addr
= p
->addr
;
668 if (p
->symbol_name
) {
671 kprobe_lookup_name(p
->symbol_name
, addr
);
676 return (kprobe_opcode_t
*)(((char *)addr
) + p
->offset
);
679 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
680 static struct kprobe
* __kprobes
__get_valid_kprobe(struct kprobe
*p
)
682 struct kprobe
*old_p
, *list_p
;
684 old_p
= get_kprobe(p
->addr
);
685 if (unlikely(!old_p
))
689 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
)
691 /* kprobe p is a valid probe */
699 /* Return error if the kprobe is being re-registered */
700 static inline int check_kprobe_rereg(struct kprobe
*p
)
703 struct kprobe
*old_p
;
705 mutex_lock(&kprobe_mutex
);
706 old_p
= __get_valid_kprobe(p
);
709 mutex_unlock(&kprobe_mutex
);
713 int __kprobes
register_kprobe(struct kprobe
*p
)
716 struct kprobe
*old_p
;
717 struct module
*probed_mod
;
718 kprobe_opcode_t
*addr
;
720 addr
= kprobe_addr(p
);
725 ret
= check_kprobe_rereg(p
);
730 if (!kernel_text_address((unsigned long) p
->addr
) ||
731 in_kprobes_functions((unsigned long) p
->addr
)) {
736 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
737 p
->flags
&= KPROBE_FLAG_DISABLED
;
740 * Check if are we probing a module.
742 probed_mod
= __module_text_address((unsigned long) p
->addr
);
745 * We must hold a refcount of the probed module while updating
746 * its code to prohibit unexpected unloading.
748 if (unlikely(!try_module_get(probed_mod
))) {
753 * If the module freed .init.text, we couldn't insert
756 if (within_module_init((unsigned long)p
->addr
, probed_mod
) &&
757 probed_mod
->state
!= MODULE_STATE_COMING
) {
758 module_put(probed_mod
);
766 INIT_LIST_HEAD(&p
->list
);
767 mutex_lock(&kprobe_mutex
);
768 old_p
= get_kprobe(p
->addr
);
770 ret
= register_aggr_kprobe(old_p
, p
);
774 mutex_lock(&text_mutex
);
775 ret
= arch_prepare_kprobe(p
);
777 goto out_unlock_text
;
779 INIT_HLIST_NODE(&p
->hlist
);
780 hlist_add_head_rcu(&p
->hlist
,
781 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
783 if (!kprobes_all_disarmed
&& !kprobe_disabled(p
))
787 mutex_unlock(&text_mutex
);
789 mutex_unlock(&kprobe_mutex
);
792 module_put(probed_mod
);
796 EXPORT_SYMBOL_GPL(register_kprobe
);
799 * Unregister a kprobe without a scheduler synchronization.
801 static int __kprobes
__unregister_kprobe_top(struct kprobe
*p
)
803 struct kprobe
*old_p
, *list_p
;
805 old_p
= __get_valid_kprobe(p
);
810 (old_p
->pre_handler
== aggr_pre_handler
&&
811 list_is_singular(&old_p
->list
))) {
813 * Only probe on the hash list. Disarm only if kprobes are
814 * enabled and not gone - otherwise, the breakpoint would
815 * already have been removed. We save on flushing icache.
817 if (!kprobes_all_disarmed
&& !kprobe_disabled(old_p
))
819 hlist_del_rcu(&old_p
->hlist
);
821 if (p
->break_handler
&& !kprobe_gone(p
))
822 old_p
->break_handler
= NULL
;
823 if (p
->post_handler
&& !kprobe_gone(p
)) {
824 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
) {
825 if ((list_p
!= p
) && (list_p
->post_handler
))
828 old_p
->post_handler
= NULL
;
831 list_del_rcu(&p
->list
);
832 if (!kprobe_disabled(old_p
)) {
833 try_to_disable_aggr_kprobe(old_p
);
834 if (!kprobes_all_disarmed
&& kprobe_disabled(old_p
))
835 disarm_kprobe(old_p
);
841 static void __kprobes
__unregister_kprobe_bottom(struct kprobe
*p
)
843 struct kprobe
*old_p
;
845 if (list_empty(&p
->list
))
846 arch_remove_kprobe(p
);
847 else if (list_is_singular(&p
->list
)) {
848 /* "p" is the last child of an aggr_kprobe */
849 old_p
= list_entry(p
->list
.next
, struct kprobe
, list
);
851 arch_remove_kprobe(old_p
);
856 int __kprobes
register_kprobes(struct kprobe
**kps
, int num
)
862 for (i
= 0; i
< num
; i
++) {
863 ret
= register_kprobe(kps
[i
]);
866 unregister_kprobes(kps
, i
);
872 EXPORT_SYMBOL_GPL(register_kprobes
);
874 void __kprobes
unregister_kprobe(struct kprobe
*p
)
876 unregister_kprobes(&p
, 1);
878 EXPORT_SYMBOL_GPL(unregister_kprobe
);
880 void __kprobes
unregister_kprobes(struct kprobe
**kps
, int num
)
886 mutex_lock(&kprobe_mutex
);
887 for (i
= 0; i
< num
; i
++)
888 if (__unregister_kprobe_top(kps
[i
]) < 0)
890 mutex_unlock(&kprobe_mutex
);
893 for (i
= 0; i
< num
; i
++)
895 __unregister_kprobe_bottom(kps
[i
]);
897 EXPORT_SYMBOL_GPL(unregister_kprobes
);
899 static struct notifier_block kprobe_exceptions_nb
= {
900 .notifier_call
= kprobe_exceptions_notify
,
901 .priority
= 0x7fffffff /* we need to be notified first */
904 unsigned long __weak
arch_deref_entry_point(void *entry
)
906 return (unsigned long)entry
;
909 int __kprobes
register_jprobes(struct jprobe
**jps
, int num
)
916 for (i
= 0; i
< num
; i
++) {
919 addr
= arch_deref_entry_point(jp
->entry
);
921 if (!kernel_text_address(addr
))
924 /* Todo: Verify probepoint is a function entry point */
925 jp
->kp
.pre_handler
= setjmp_pre_handler
;
926 jp
->kp
.break_handler
= longjmp_break_handler
;
927 ret
= register_kprobe(&jp
->kp
);
931 unregister_jprobes(jps
, i
);
937 EXPORT_SYMBOL_GPL(register_jprobes
);
939 int __kprobes
register_jprobe(struct jprobe
*jp
)
941 return register_jprobes(&jp
, 1);
943 EXPORT_SYMBOL_GPL(register_jprobe
);
945 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
947 unregister_jprobes(&jp
, 1);
949 EXPORT_SYMBOL_GPL(unregister_jprobe
);
951 void __kprobes
unregister_jprobes(struct jprobe
**jps
, int num
)
957 mutex_lock(&kprobe_mutex
);
958 for (i
= 0; i
< num
; i
++)
959 if (__unregister_kprobe_top(&jps
[i
]->kp
) < 0)
960 jps
[i
]->kp
.addr
= NULL
;
961 mutex_unlock(&kprobe_mutex
);
964 for (i
= 0; i
< num
; i
++) {
966 __unregister_kprobe_bottom(&jps
[i
]->kp
);
969 EXPORT_SYMBOL_GPL(unregister_jprobes
);
971 #ifdef CONFIG_KRETPROBES
973 * This kprobe pre_handler is registered with every kretprobe. When probe
974 * hits it will set up the return probe.
976 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
977 struct pt_regs
*regs
)
979 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
980 unsigned long hash
, flags
= 0;
981 struct kretprobe_instance
*ri
;
983 /*TODO: consider to only swap the RA after the last pre_handler fired */
984 hash
= hash_ptr(current
, KPROBE_HASH_BITS
);
985 spin_lock_irqsave(&rp
->lock
, flags
);
986 if (!hlist_empty(&rp
->free_instances
)) {
987 ri
= hlist_entry(rp
->free_instances
.first
,
988 struct kretprobe_instance
, hlist
);
989 hlist_del(&ri
->hlist
);
990 spin_unlock_irqrestore(&rp
->lock
, flags
);
995 if (rp
->entry_handler
&& rp
->entry_handler(ri
, regs
))
998 arch_prepare_kretprobe(ri
, regs
);
1000 /* XXX(hch): why is there no hlist_move_head? */
1001 INIT_HLIST_NODE(&ri
->hlist
);
1002 kretprobe_table_lock(hash
, &flags
);
1003 hlist_add_head(&ri
->hlist
, &kretprobe_inst_table
[hash
]);
1004 kretprobe_table_unlock(hash
, &flags
);
1007 spin_unlock_irqrestore(&rp
->lock
, flags
);
1012 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1015 struct kretprobe_instance
*inst
;
1019 if (kretprobe_blacklist_size
) {
1020 addr
= kprobe_addr(&rp
->kp
);
1024 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1025 if (kretprobe_blacklist
[i
].addr
== addr
)
1030 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
1031 rp
->kp
.post_handler
= NULL
;
1032 rp
->kp
.fault_handler
= NULL
;
1033 rp
->kp
.break_handler
= NULL
;
1035 /* Pre-allocate memory for max kretprobe instances */
1036 if (rp
->maxactive
<= 0) {
1037 #ifdef CONFIG_PREEMPT
1038 rp
->maxactive
= max(10, 2 * num_possible_cpus());
1040 rp
->maxactive
= num_possible_cpus();
1043 spin_lock_init(&rp
->lock
);
1044 INIT_HLIST_HEAD(&rp
->free_instances
);
1045 for (i
= 0; i
< rp
->maxactive
; i
++) {
1046 inst
= kmalloc(sizeof(struct kretprobe_instance
) +
1047 rp
->data_size
, GFP_KERNEL
);
1052 INIT_HLIST_NODE(&inst
->hlist
);
1053 hlist_add_head(&inst
->hlist
, &rp
->free_instances
);
1057 /* Establish function entry probe point */
1058 ret
= register_kprobe(&rp
->kp
);
1063 EXPORT_SYMBOL_GPL(register_kretprobe
);
1065 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1071 for (i
= 0; i
< num
; i
++) {
1072 ret
= register_kretprobe(rps
[i
]);
1075 unregister_kretprobes(rps
, i
);
1081 EXPORT_SYMBOL_GPL(register_kretprobes
);
1083 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1085 unregister_kretprobes(&rp
, 1);
1087 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1089 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1095 mutex_lock(&kprobe_mutex
);
1096 for (i
= 0; i
< num
; i
++)
1097 if (__unregister_kprobe_top(&rps
[i
]->kp
) < 0)
1098 rps
[i
]->kp
.addr
= NULL
;
1099 mutex_unlock(&kprobe_mutex
);
1101 synchronize_sched();
1102 for (i
= 0; i
< num
; i
++) {
1103 if (rps
[i
]->kp
.addr
) {
1104 __unregister_kprobe_bottom(&rps
[i
]->kp
);
1105 cleanup_rp_inst(rps
[i
]);
1109 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1111 #else /* CONFIG_KRETPROBES */
1112 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1116 EXPORT_SYMBOL_GPL(register_kretprobe
);
1118 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1122 EXPORT_SYMBOL_GPL(register_kretprobes
);
1124 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1127 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1129 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1132 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1134 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1135 struct pt_regs
*regs
)
1140 #endif /* CONFIG_KRETPROBES */
1142 /* Set the kprobe gone and remove its instruction buffer. */
1143 static void __kprobes
kill_kprobe(struct kprobe
*p
)
1147 p
->flags
|= KPROBE_FLAG_GONE
;
1148 if (p
->pre_handler
== aggr_pre_handler
) {
1150 * If this is an aggr_kprobe, we have to list all the
1151 * chained probes and mark them GONE.
1153 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1154 kp
->flags
|= KPROBE_FLAG_GONE
;
1155 p
->post_handler
= NULL
;
1156 p
->break_handler
= NULL
;
1159 * Here, we can remove insn_slot safely, because no thread calls
1160 * the original probed function (which will be freed soon) any more.
1162 arch_remove_kprobe(p
);
1165 void __kprobes
dump_kprobe(struct kprobe
*kp
)
1167 printk(KERN_WARNING
"Dumping kprobe:\n");
1168 printk(KERN_WARNING
"Name: %s\nAddress: %p\nOffset: %x\n",
1169 kp
->symbol_name
, kp
->addr
, kp
->offset
);
1172 /* Module notifier call back, checking kprobes on the module */
1173 static int __kprobes
kprobes_module_callback(struct notifier_block
*nb
,
1174 unsigned long val
, void *data
)
1176 struct module
*mod
= data
;
1177 struct hlist_head
*head
;
1178 struct hlist_node
*node
;
1181 int checkcore
= (val
== MODULE_STATE_GOING
);
1183 if (val
!= MODULE_STATE_GOING
&& val
!= MODULE_STATE_LIVE
)
1187 * When MODULE_STATE_GOING was notified, both of module .text and
1188 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1189 * notified, only .init.text section would be freed. We need to
1190 * disable kprobes which have been inserted in the sections.
1192 mutex_lock(&kprobe_mutex
);
1193 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1194 head
= &kprobe_table
[i
];
1195 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
1196 if (within_module_init((unsigned long)p
->addr
, mod
) ||
1198 within_module_core((unsigned long)p
->addr
, mod
))) {
1200 * The vaddr this probe is installed will soon
1201 * be vfreed buy not synced to disk. Hence,
1202 * disarming the breakpoint isn't needed.
1207 mutex_unlock(&kprobe_mutex
);
1211 static struct notifier_block kprobe_module_nb
= {
1212 .notifier_call
= kprobes_module_callback
,
1216 static int __init
init_kprobes(void)
1219 unsigned long offset
= 0, size
= 0;
1220 char *modname
, namebuf
[128];
1221 const char *symbol_name
;
1223 struct kprobe_blackpoint
*kb
;
1225 /* FIXME allocate the probe table, currently defined statically */
1226 /* initialize all list heads */
1227 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1228 INIT_HLIST_HEAD(&kprobe_table
[i
]);
1229 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
1230 spin_lock_init(&(kretprobe_table_locks
[i
].lock
));
1234 * Lookup and populate the kprobe_blacklist.
1236 * Unlike the kretprobe blacklist, we'll need to determine
1237 * the range of addresses that belong to the said functions,
1238 * since a kprobe need not necessarily be at the beginning
1241 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
1242 kprobe_lookup_name(kb
->name
, addr
);
1246 kb
->start_addr
= (unsigned long)addr
;
1247 symbol_name
= kallsyms_lookup(kb
->start_addr
,
1248 &size
, &offset
, &modname
, namebuf
);
1255 if (kretprobe_blacklist_size
) {
1256 /* lookup the function address from its name */
1257 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1258 kprobe_lookup_name(kretprobe_blacklist
[i
].name
,
1259 kretprobe_blacklist
[i
].addr
);
1260 if (!kretprobe_blacklist
[i
].addr
)
1261 printk("kretprobe: lookup failed: %s\n",
1262 kretprobe_blacklist
[i
].name
);
1266 /* By default, kprobes are armed */
1267 kprobes_all_disarmed
= false;
1269 err
= arch_init_kprobes();
1271 err
= register_die_notifier(&kprobe_exceptions_nb
);
1273 err
= register_module_notifier(&kprobe_module_nb
);
1275 kprobes_initialized
= (err
== 0);
1282 #ifdef CONFIG_DEBUG_FS
1283 static void __kprobes
report_probe(struct seq_file
*pi
, struct kprobe
*p
,
1284 const char *sym
, int offset
,char *modname
)
1288 if (p
->pre_handler
== pre_handler_kretprobe
)
1290 else if (p
->pre_handler
== setjmp_pre_handler
)
1295 seq_printf(pi
, "%p %s %s+0x%x %s %s%s\n",
1296 p
->addr
, kprobe_type
, sym
, offset
,
1297 (modname
? modname
: " "),
1298 (kprobe_gone(p
) ? "[GONE]" : ""),
1299 ((kprobe_disabled(p
) && !kprobe_gone(p
)) ?
1300 "[DISABLED]" : ""));
1302 seq_printf(pi
, "%p %s %p %s%s\n",
1303 p
->addr
, kprobe_type
, p
->addr
,
1304 (kprobe_gone(p
) ? "[GONE]" : ""),
1305 ((kprobe_disabled(p
) && !kprobe_gone(p
)) ?
1306 "[DISABLED]" : ""));
1309 static void __kprobes
*kprobe_seq_start(struct seq_file
*f
, loff_t
*pos
)
1311 return (*pos
< KPROBE_TABLE_SIZE
) ? pos
: NULL
;
1314 static void __kprobes
*kprobe_seq_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
1317 if (*pos
>= KPROBE_TABLE_SIZE
)
1322 static void __kprobes
kprobe_seq_stop(struct seq_file
*f
, void *v
)
1327 static int __kprobes
show_kprobe_addr(struct seq_file
*pi
, void *v
)
1329 struct hlist_head
*head
;
1330 struct hlist_node
*node
;
1331 struct kprobe
*p
, *kp
;
1332 const char *sym
= NULL
;
1333 unsigned int i
= *(loff_t
*) v
;
1334 unsigned long offset
= 0;
1335 char *modname
, namebuf
[128];
1337 head
= &kprobe_table
[i
];
1339 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1340 sym
= kallsyms_lookup((unsigned long)p
->addr
, NULL
,
1341 &offset
, &modname
, namebuf
);
1342 if (p
->pre_handler
== aggr_pre_handler
) {
1343 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1344 report_probe(pi
, kp
, sym
, offset
, modname
);
1346 report_probe(pi
, p
, sym
, offset
, modname
);
1352 static const struct seq_operations kprobes_seq_ops
= {
1353 .start
= kprobe_seq_start
,
1354 .next
= kprobe_seq_next
,
1355 .stop
= kprobe_seq_stop
,
1356 .show
= show_kprobe_addr
1359 static int __kprobes
kprobes_open(struct inode
*inode
, struct file
*filp
)
1361 return seq_open(filp
, &kprobes_seq_ops
);
1364 static const struct file_operations debugfs_kprobes_operations
= {
1365 .open
= kprobes_open
,
1367 .llseek
= seq_lseek
,
1368 .release
= seq_release
,
1371 /* Disable one kprobe */
1372 int __kprobes
disable_kprobe(struct kprobe
*kp
)
1377 mutex_lock(&kprobe_mutex
);
1379 /* Check whether specified probe is valid. */
1380 p
= __get_valid_kprobe(kp
);
1381 if (unlikely(p
== NULL
)) {
1386 /* If the probe is already disabled (or gone), just return */
1387 if (kprobe_disabled(kp
))
1390 kp
->flags
|= KPROBE_FLAG_DISABLED
;
1392 /* When kp != p, p is always enabled. */
1393 try_to_disable_aggr_kprobe(p
);
1395 if (!kprobes_all_disarmed
&& kprobe_disabled(p
))
1398 mutex_unlock(&kprobe_mutex
);
1401 EXPORT_SYMBOL_GPL(disable_kprobe
);
1403 /* Enable one kprobe */
1404 int __kprobes
enable_kprobe(struct kprobe
*kp
)
1409 mutex_lock(&kprobe_mutex
);
1411 /* Check whether specified probe is valid. */
1412 p
= __get_valid_kprobe(kp
);
1413 if (unlikely(p
== NULL
)) {
1418 if (kprobe_gone(kp
)) {
1419 /* This kprobe has gone, we couldn't enable it. */
1424 if (!kprobes_all_disarmed
&& kprobe_disabled(p
))
1427 p
->flags
&= ~KPROBE_FLAG_DISABLED
;
1429 kp
->flags
&= ~KPROBE_FLAG_DISABLED
;
1431 mutex_unlock(&kprobe_mutex
);
1434 EXPORT_SYMBOL_GPL(enable_kprobe
);
1436 static void __kprobes
arm_all_kprobes(void)
1438 struct hlist_head
*head
;
1439 struct hlist_node
*node
;
1443 mutex_lock(&kprobe_mutex
);
1445 /* If kprobes are armed, just return */
1446 if (!kprobes_all_disarmed
)
1447 goto already_enabled
;
1449 mutex_lock(&text_mutex
);
1450 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1451 head
= &kprobe_table
[i
];
1452 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
1453 if (!kprobe_disabled(p
))
1456 mutex_unlock(&text_mutex
);
1458 kprobes_all_disarmed
= false;
1459 printk(KERN_INFO
"Kprobes globally enabled\n");
1462 mutex_unlock(&kprobe_mutex
);
1466 static void __kprobes
disarm_all_kprobes(void)
1468 struct hlist_head
*head
;
1469 struct hlist_node
*node
;
1473 mutex_lock(&kprobe_mutex
);
1475 /* If kprobes are already disarmed, just return */
1476 if (kprobes_all_disarmed
)
1477 goto already_disabled
;
1479 kprobes_all_disarmed
= true;
1480 printk(KERN_INFO
"Kprobes globally disabled\n");
1481 mutex_lock(&text_mutex
);
1482 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1483 head
= &kprobe_table
[i
];
1484 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1485 if (!arch_trampoline_kprobe(p
) && !kprobe_disabled(p
))
1486 arch_disarm_kprobe(p
);
1490 mutex_unlock(&text_mutex
);
1491 mutex_unlock(&kprobe_mutex
);
1492 /* Allow all currently running kprobes to complete */
1493 synchronize_sched();
1497 mutex_unlock(&kprobe_mutex
);
1502 * XXX: The debugfs bool file interface doesn't allow for callbacks
1503 * when the bool state is switched. We can reuse that facility when
1506 static ssize_t
read_enabled_file_bool(struct file
*file
,
1507 char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1511 if (!kprobes_all_disarmed
)
1517 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
1520 static ssize_t
write_enabled_file_bool(struct file
*file
,
1521 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1526 buf_size
= min(count
, (sizeof(buf
)-1));
1527 if (copy_from_user(buf
, user_buf
, buf_size
))
1539 disarm_all_kprobes();
1546 static const struct file_operations fops_kp
= {
1547 .read
= read_enabled_file_bool
,
1548 .write
= write_enabled_file_bool
,
1551 static int __kprobes
debugfs_kprobe_init(void)
1553 struct dentry
*dir
, *file
;
1554 unsigned int value
= 1;
1556 dir
= debugfs_create_dir("kprobes", NULL
);
1560 file
= debugfs_create_file("list", 0444, dir
, NULL
,
1561 &debugfs_kprobes_operations
);
1563 debugfs_remove(dir
);
1567 file
= debugfs_create_file("enabled", 0600, dir
,
1570 debugfs_remove(dir
);
1577 late_initcall(debugfs_kprobe_init
);
1578 #endif /* CONFIG_DEBUG_FS */
1580 module_init(init_kprobes
);
1582 /* defined in arch/.../kernel/kprobes.c */
1583 EXPORT_SYMBOL_GPL(jprobe_return
);