2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
65 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
66 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
67 static atomic_t kprobe_count
;
69 /* NOTE: change this value only with kprobe_mutex held */
70 static bool kprobe_enabled
;
72 DEFINE_MUTEX(kprobe_mutex
); /* Protects kprobe_table */
73 DEFINE_SPINLOCK(kretprobe_lock
); /* Protects kretprobe_inst_table */
74 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
76 static struct notifier_block kprobe_page_fault_nb
= {
77 .notifier_call
= kprobe_exceptions_notify
,
78 .priority
= 0x7fffffff /* we need to notified first */
81 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
83 * kprobe->ainsn.insn points to the copy of the instruction to be
84 * single-stepped. x86_64, POWER4 and above have no-exec support and
85 * stepping on the instruction on a vmalloced/kmalloced/data page
86 * is a recipe for disaster
88 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
90 struct kprobe_insn_page
{
91 struct hlist_node hlist
;
92 kprobe_opcode_t
*insns
; /* Page of instruction slots */
93 char slot_used
[INSNS_PER_PAGE
];
98 enum kprobe_slot_state
{
104 static struct hlist_head kprobe_insn_pages
;
105 static int kprobe_garbage_slots
;
106 static int collect_garbage_slots(void);
108 static int __kprobes
check_safety(void)
111 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
112 ret
= freeze_processes();
114 struct task_struct
*p
, *q
;
115 do_each_thread(p
, q
) {
116 if (p
!= current
&& p
->state
== TASK_RUNNING
&&
118 printk("Check failed: %s is running\n",p
->comm
);
122 } while_each_thread(p
, q
);
133 * get_insn_slot() - Find a slot on an executable page for an instruction.
134 * We allocate an executable page if there's no room on existing ones.
136 kprobe_opcode_t __kprobes
*get_insn_slot(void)
138 struct kprobe_insn_page
*kip
;
139 struct hlist_node
*pos
;
142 hlist_for_each_entry(kip
, pos
, &kprobe_insn_pages
, hlist
) {
143 if (kip
->nused
< INSNS_PER_PAGE
) {
145 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
146 if (kip
->slot_used
[i
] == SLOT_CLEAN
) {
147 kip
->slot_used
[i
] = SLOT_USED
;
149 return kip
->insns
+ (i
* MAX_INSN_SIZE
);
152 /* Surprise! No unused slots. Fix kip->nused. */
153 kip
->nused
= INSNS_PER_PAGE
;
157 /* If there are any garbage slots, collect it and try again. */
158 if (kprobe_garbage_slots
&& collect_garbage_slots() == 0) {
161 /* All out of space. Need to allocate a new page. Use slot 0. */
162 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
167 * Use module_alloc so this page is within +/- 2GB of where the
168 * kernel image and loaded module images reside. This is required
169 * so x86_64 can correctly handle the %rip-relative fixups.
171 kip
->insns
= module_alloc(PAGE_SIZE
);
176 INIT_HLIST_NODE(&kip
->hlist
);
177 hlist_add_head(&kip
->hlist
, &kprobe_insn_pages
);
178 memset(kip
->slot_used
, SLOT_CLEAN
, INSNS_PER_PAGE
);
179 kip
->slot_used
[0] = SLOT_USED
;
185 /* Return 1 if all garbages are collected, otherwise 0. */
186 static int __kprobes
collect_one_slot(struct kprobe_insn_page
*kip
, int idx
)
188 kip
->slot_used
[idx
] = SLOT_CLEAN
;
190 if (kip
->nused
== 0) {
192 * Page is no longer in use. Free it unless
193 * it's the last one. We keep the last one
194 * so as not to have to set it up again the
195 * next time somebody inserts a probe.
197 hlist_del(&kip
->hlist
);
198 if (hlist_empty(&kprobe_insn_pages
)) {
199 INIT_HLIST_NODE(&kip
->hlist
);
200 hlist_add_head(&kip
->hlist
,
203 module_free(NULL
, kip
->insns
);
211 static int __kprobes
collect_garbage_slots(void)
213 struct kprobe_insn_page
*kip
;
214 struct hlist_node
*pos
, *next
;
216 /* Ensure no-one is preepmted on the garbages */
217 if (check_safety() != 0)
220 hlist_for_each_entry_safe(kip
, pos
, next
, &kprobe_insn_pages
, hlist
) {
222 if (kip
->ngarbage
== 0)
224 kip
->ngarbage
= 0; /* we will collect all garbages */
225 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
226 if (kip
->slot_used
[i
] == SLOT_DIRTY
&&
227 collect_one_slot(kip
, i
))
231 kprobe_garbage_slots
= 0;
235 void __kprobes
free_insn_slot(kprobe_opcode_t
* slot
, int dirty
)
237 struct kprobe_insn_page
*kip
;
238 struct hlist_node
*pos
;
240 hlist_for_each_entry(kip
, pos
, &kprobe_insn_pages
, hlist
) {
241 if (kip
->insns
<= slot
&&
242 slot
< kip
->insns
+ (INSNS_PER_PAGE
* MAX_INSN_SIZE
)) {
243 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
245 kip
->slot_used
[i
] = SLOT_DIRTY
;
248 collect_one_slot(kip
, i
);
254 if (dirty
&& ++kprobe_garbage_slots
> INSNS_PER_PAGE
)
255 collect_garbage_slots();
259 /* We have preemption disabled.. so it is safe to use __ versions */
260 static inline void set_kprobe_instance(struct kprobe
*kp
)
262 __get_cpu_var(kprobe_instance
) = kp
;
265 static inline void reset_kprobe_instance(void)
267 __get_cpu_var(kprobe_instance
) = NULL
;
271 * This routine is called either:
272 * - under the kprobe_mutex - during kprobe_[un]register()
274 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
276 struct kprobe __kprobes
*get_kprobe(void *addr
)
278 struct hlist_head
*head
;
279 struct hlist_node
*node
;
282 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
283 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
291 * Aggregate handlers for multiple kprobes support - these handlers
292 * take care of invoking the individual kprobe handlers on p->list
294 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
298 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
299 if (kp
->pre_handler
) {
300 set_kprobe_instance(kp
);
301 if (kp
->pre_handler(kp
, regs
))
304 reset_kprobe_instance();
309 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
314 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
315 if (kp
->post_handler
) {
316 set_kprobe_instance(kp
);
317 kp
->post_handler(kp
, regs
, flags
);
318 reset_kprobe_instance();
323 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
326 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
329 * if we faulted "during" the execution of a user specified
330 * probe handler, invoke just that probe's fault handler
332 if (cur
&& cur
->fault_handler
) {
333 if (cur
->fault_handler(cur
, regs
, trapnr
))
339 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
341 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
344 if (cur
&& cur
->break_handler
) {
345 if (cur
->break_handler(cur
, regs
))
348 reset_kprobe_instance();
352 /* Walks the list and increments nmissed count for multiprobe case */
353 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
356 if (p
->pre_handler
!= aggr_pre_handler
) {
359 list_for_each_entry_rcu(kp
, &p
->list
, list
)
365 /* Called with kretprobe_lock held */
366 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
,
367 struct hlist_head
*head
)
369 /* remove rp inst off the rprobe_inst_table */
370 hlist_del(&ri
->hlist
);
372 /* remove rp inst off the used list */
373 hlist_del(&ri
->uflist
);
374 /* put rp inst back onto the free list */
375 INIT_HLIST_NODE(&ri
->uflist
);
376 hlist_add_head(&ri
->uflist
, &ri
->rp
->free_instances
);
379 hlist_add_head(&ri
->hlist
, head
);
382 struct hlist_head __kprobes
*kretprobe_inst_table_head(struct task_struct
*tsk
)
384 return &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)];
388 * This function is called from finish_task_switch when task tk becomes dead,
389 * so that we can recycle any function-return probe instances associated
390 * with this task. These left over instances represent probed functions
391 * that have been called but will never return.
393 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
395 struct kretprobe_instance
*ri
;
396 struct hlist_head
*head
, empty_rp
;
397 struct hlist_node
*node
, *tmp
;
398 unsigned long flags
= 0;
400 INIT_HLIST_HEAD(&empty_rp
);
401 spin_lock_irqsave(&kretprobe_lock
, flags
);
402 head
= kretprobe_inst_table_head(tk
);
403 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
405 recycle_rp_inst(ri
, &empty_rp
);
407 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
409 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
410 hlist_del(&ri
->hlist
);
415 static inline void free_rp_inst(struct kretprobe
*rp
)
417 struct kretprobe_instance
*ri
;
418 struct hlist_node
*pos
, *next
;
420 hlist_for_each_entry_safe(ri
, pos
, next
, &rp
->free_instances
, uflist
) {
421 hlist_del(&ri
->uflist
);
427 * Keep all fields in the kprobe consistent
429 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
431 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
432 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
436 * Add the new probe to old_p->list. Fail if this is the
437 * second jprobe at the address - two jprobes can't coexist
439 static int __kprobes
add_new_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
441 if (p
->break_handler
) {
442 if (old_p
->break_handler
)
444 list_add_tail_rcu(&p
->list
, &old_p
->list
);
445 old_p
->break_handler
= aggr_break_handler
;
447 list_add_rcu(&p
->list
, &old_p
->list
);
448 if (p
->post_handler
&& !old_p
->post_handler
)
449 old_p
->post_handler
= aggr_post_handler
;
454 * Fill in the required fields of the "manager kprobe". Replace the
455 * earlier kprobe in the hlist with the manager kprobe
457 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
462 ap
->pre_handler
= aggr_pre_handler
;
463 ap
->fault_handler
= aggr_fault_handler
;
465 ap
->post_handler
= aggr_post_handler
;
466 if (p
->break_handler
)
467 ap
->break_handler
= aggr_break_handler
;
469 INIT_LIST_HEAD(&ap
->list
);
470 list_add_rcu(&p
->list
, &ap
->list
);
472 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
476 * This is the second or subsequent kprobe at the address - handle
479 static int __kprobes
register_aggr_kprobe(struct kprobe
*old_p
,
485 if (old_p
->pre_handler
== aggr_pre_handler
) {
486 copy_kprobe(old_p
, p
);
487 ret
= add_new_kprobe(old_p
, p
);
489 ap
= kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
492 add_aggr_kprobe(ap
, old_p
);
494 ret
= add_new_kprobe(ap
, p
);
499 static int __kprobes
in_kprobes_functions(unsigned long addr
)
501 if (addr
>= (unsigned long)__kprobes_text_start
&&
502 addr
< (unsigned long)__kprobes_text_end
)
507 static int __kprobes
__register_kprobe(struct kprobe
*p
,
508 unsigned long called_from
)
511 struct kprobe
*old_p
;
512 struct module
*probed_mod
;
515 * If we have a symbol_name argument look it up,
516 * and add it to the address. That way the addr
517 * field can either be global or relative to a symbol.
519 if (p
->symbol_name
) {
522 kprobe_lookup_name(p
->symbol_name
, p
->addr
);
527 p
->addr
= (kprobe_opcode_t
*)(((char *)p
->addr
)+ p
->offset
);
529 if (!kernel_text_address((unsigned long) p
->addr
) ||
530 in_kprobes_functions((unsigned long) p
->addr
))
533 p
->mod_refcounted
= 0;
536 * Check if are we probing a module.
538 probed_mod
= module_text_address((unsigned long) p
->addr
);
540 struct module
*calling_mod
= module_text_address(called_from
);
542 * We must allow modules to probe themself and in this case
543 * avoid incrementing the module refcount, so as to allow
544 * unloading of self probing modules.
546 if (calling_mod
&& calling_mod
!= probed_mod
) {
547 if (unlikely(!try_module_get(probed_mod
)))
549 p
->mod_refcounted
= 1;
555 mutex_lock(&kprobe_mutex
);
556 old_p
= get_kprobe(p
->addr
);
558 ret
= register_aggr_kprobe(old_p
, p
);
560 atomic_inc(&kprobe_count
);
564 ret
= arch_prepare_kprobe(p
);
568 INIT_HLIST_NODE(&p
->hlist
);
569 hlist_add_head_rcu(&p
->hlist
,
570 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
572 if (kprobe_enabled
) {
573 if (atomic_add_return(1, &kprobe_count
) == \
574 (ARCH_INACTIVE_KPROBE_COUNT
+ 1))
575 register_page_fault_notifier(&kprobe_page_fault_nb
);
580 mutex_unlock(&kprobe_mutex
);
582 if (ret
&& probed_mod
)
583 module_put(probed_mod
);
587 int __kprobes
register_kprobe(struct kprobe
*p
)
589 return __register_kprobe(p
, (unsigned long)__builtin_return_address(0));
592 void __kprobes
unregister_kprobe(struct kprobe
*p
)
595 struct kprobe
*old_p
, *list_p
;
598 mutex_lock(&kprobe_mutex
);
599 old_p
= get_kprobe(p
->addr
);
600 if (unlikely(!old_p
)) {
601 mutex_unlock(&kprobe_mutex
);
605 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
)
607 /* kprobe p is a valid probe */
609 mutex_unlock(&kprobe_mutex
);
614 (old_p
->pre_handler
== aggr_pre_handler
&&
615 p
->list
.next
== &old_p
->list
&& p
->list
.prev
== &old_p
->list
)) {
617 * Only probe on the hash list. Disarm only if kprobes are
618 * enabled - otherwise, the breakpoint would already have
619 * been removed. We save on flushing icache.
622 arch_disarm_kprobe(p
);
623 hlist_del_rcu(&old_p
->hlist
);
626 list_del_rcu(&p
->list
);
630 mutex_unlock(&kprobe_mutex
);
633 if (p
->mod_refcounted
) {
634 mod
= module_text_address((unsigned long)p
->addr
);
641 list_del_rcu(&p
->list
);
644 arch_remove_kprobe(p
);
646 mutex_lock(&kprobe_mutex
);
647 if (p
->break_handler
)
648 old_p
->break_handler
= NULL
;
649 if (p
->post_handler
){
650 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
){
651 if (list_p
->post_handler
){
657 old_p
->post_handler
= NULL
;
659 mutex_unlock(&kprobe_mutex
);
662 /* Call unregister_page_fault_notifier()
663 * if no probes are active
665 mutex_lock(&kprobe_mutex
);
666 if (atomic_add_return(-1, &kprobe_count
) == \
667 ARCH_INACTIVE_KPROBE_COUNT
)
668 unregister_page_fault_notifier(&kprobe_page_fault_nb
);
669 mutex_unlock(&kprobe_mutex
);
673 static struct notifier_block kprobe_exceptions_nb
= {
674 .notifier_call
= kprobe_exceptions_notify
,
675 .priority
= 0x7fffffff /* we need to be notified first */
679 int __kprobes
register_jprobe(struct jprobe
*jp
)
681 /* Todo: Verify probepoint is a function entry point */
682 jp
->kp
.pre_handler
= setjmp_pre_handler
;
683 jp
->kp
.break_handler
= longjmp_break_handler
;
685 return __register_kprobe(&jp
->kp
,
686 (unsigned long)__builtin_return_address(0));
689 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
691 unregister_kprobe(&jp
->kp
);
694 #ifdef ARCH_SUPPORTS_KRETPROBES
697 * This kprobe pre_handler is registered with every kretprobe. When probe
698 * hits it will set up the return probe.
700 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
701 struct pt_regs
*regs
)
703 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
704 unsigned long flags
= 0;
706 /*TODO: consider to only swap the RA after the last pre_handler fired */
707 spin_lock_irqsave(&kretprobe_lock
, flags
);
708 if (!hlist_empty(&rp
->free_instances
)) {
709 struct kretprobe_instance
*ri
;
711 ri
= hlist_entry(rp
->free_instances
.first
,
712 struct kretprobe_instance
, uflist
);
715 arch_prepare_kretprobe(ri
, regs
);
717 /* XXX(hch): why is there no hlist_move_head? */
718 hlist_del(&ri
->uflist
);
719 hlist_add_head(&ri
->uflist
, &ri
->rp
->used_instances
);
720 hlist_add_head(&ri
->hlist
, kretprobe_inst_table_head(ri
->task
));
723 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
727 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
730 struct kretprobe_instance
*inst
;
733 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
734 rp
->kp
.post_handler
= NULL
;
735 rp
->kp
.fault_handler
= NULL
;
736 rp
->kp
.break_handler
= NULL
;
738 /* Pre-allocate memory for max kretprobe instances */
739 if (rp
->maxactive
<= 0) {
740 #ifdef CONFIG_PREEMPT
741 rp
->maxactive
= max(10, 2 * NR_CPUS
);
743 rp
->maxactive
= NR_CPUS
;
746 INIT_HLIST_HEAD(&rp
->used_instances
);
747 INIT_HLIST_HEAD(&rp
->free_instances
);
748 for (i
= 0; i
< rp
->maxactive
; i
++) {
749 inst
= kmalloc(sizeof(struct kretprobe_instance
), GFP_KERNEL
);
754 INIT_HLIST_NODE(&inst
->uflist
);
755 hlist_add_head(&inst
->uflist
, &rp
->free_instances
);
759 /* Establish function entry probe point */
760 if ((ret
= __register_kprobe(&rp
->kp
,
761 (unsigned long)__builtin_return_address(0))) != 0)
766 #else /* ARCH_SUPPORTS_KRETPROBES */
768 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
773 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
774 struct pt_regs
*regs
)
779 #endif /* ARCH_SUPPORTS_KRETPROBES */
781 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
784 struct kretprobe_instance
*ri
;
785 struct hlist_node
*pos
, *next
;
787 unregister_kprobe(&rp
->kp
);
790 spin_lock_irqsave(&kretprobe_lock
, flags
);
791 hlist_for_each_entry_safe(ri
, pos
, next
, &rp
->used_instances
, uflist
) {
793 hlist_del(&ri
->uflist
);
795 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
799 static int __init
init_kprobes(void)
803 /* FIXME allocate the probe table, currently defined statically */
804 /* initialize all list heads */
805 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
806 INIT_HLIST_HEAD(&kprobe_table
[i
]);
807 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
809 atomic_set(&kprobe_count
, 0);
811 /* By default, kprobes are enabled */
812 kprobe_enabled
= true;
814 err
= arch_init_kprobes();
816 err
= register_die_notifier(&kprobe_exceptions_nb
);
821 #ifdef CONFIG_DEBUG_FS
822 static void __kprobes
report_probe(struct seq_file
*pi
, struct kprobe
*p
,
823 const char *sym
, int offset
,char *modname
)
827 if (p
->pre_handler
== pre_handler_kretprobe
)
829 else if (p
->pre_handler
== setjmp_pre_handler
)
834 seq_printf(pi
, "%p %s %s+0x%x %s\n", p
->addr
, kprobe_type
,
835 sym
, offset
, (modname
? modname
: " "));
837 seq_printf(pi
, "%p %s %p\n", p
->addr
, kprobe_type
, p
->addr
);
840 static void __kprobes
*kprobe_seq_start(struct seq_file
*f
, loff_t
*pos
)
842 return (*pos
< KPROBE_TABLE_SIZE
) ? pos
: NULL
;
845 static void __kprobes
*kprobe_seq_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
848 if (*pos
>= KPROBE_TABLE_SIZE
)
853 static void __kprobes
kprobe_seq_stop(struct seq_file
*f
, void *v
)
858 static int __kprobes
show_kprobe_addr(struct seq_file
*pi
, void *v
)
860 struct hlist_head
*head
;
861 struct hlist_node
*node
;
862 struct kprobe
*p
, *kp
;
863 const char *sym
= NULL
;
864 unsigned int i
= *(loff_t
*) v
;
865 unsigned long offset
= 0;
866 char *modname
, namebuf
[128];
868 head
= &kprobe_table
[i
];
870 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
871 sym
= kallsyms_lookup((unsigned long)p
->addr
, NULL
,
872 &offset
, &modname
, namebuf
);
873 if (p
->pre_handler
== aggr_pre_handler
) {
874 list_for_each_entry_rcu(kp
, &p
->list
, list
)
875 report_probe(pi
, kp
, sym
, offset
, modname
);
877 report_probe(pi
, p
, sym
, offset
, modname
);
883 static struct seq_operations kprobes_seq_ops
= {
884 .start
= kprobe_seq_start
,
885 .next
= kprobe_seq_next
,
886 .stop
= kprobe_seq_stop
,
887 .show
= show_kprobe_addr
890 static int __kprobes
kprobes_open(struct inode
*inode
, struct file
*filp
)
892 return seq_open(filp
, &kprobes_seq_ops
);
895 static struct file_operations debugfs_kprobes_operations
= {
896 .open
= kprobes_open
,
899 .release
= seq_release
,
902 static void __kprobes
enable_all_kprobes(void)
904 struct hlist_head
*head
;
905 struct hlist_node
*node
;
909 mutex_lock(&kprobe_mutex
);
911 /* If kprobes are already enabled, just return */
913 goto already_enabled
;
916 * Re-register the page fault notifier only if there are any
917 * active probes at the time of enabling kprobes globally
919 if (atomic_read(&kprobe_count
) > ARCH_INACTIVE_KPROBE_COUNT
)
920 register_page_fault_notifier(&kprobe_page_fault_nb
);
922 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
923 head
= &kprobe_table
[i
];
924 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
928 kprobe_enabled
= true;
929 printk(KERN_INFO
"Kprobes globally enabled\n");
932 mutex_unlock(&kprobe_mutex
);
936 static void __kprobes
disable_all_kprobes(void)
938 struct hlist_head
*head
;
939 struct hlist_node
*node
;
943 mutex_lock(&kprobe_mutex
);
945 /* If kprobes are already disabled, just return */
947 goto already_disabled
;
949 kprobe_enabled
= false;
950 printk(KERN_INFO
"Kprobes globally disabled\n");
951 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
952 head
= &kprobe_table
[i
];
953 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
954 if (!arch_trampoline_kprobe(p
))
955 arch_disarm_kprobe(p
);
959 mutex_unlock(&kprobe_mutex
);
960 /* Allow all currently running kprobes to complete */
963 mutex_lock(&kprobe_mutex
);
964 /* Unconditionally unregister the page_fault notifier */
965 unregister_page_fault_notifier(&kprobe_page_fault_nb
);
968 mutex_unlock(&kprobe_mutex
);
973 * XXX: The debugfs bool file interface doesn't allow for callbacks
974 * when the bool state is switched. We can reuse that facility when
977 static ssize_t
read_enabled_file_bool(struct file
*file
,
978 char __user
*user_buf
, size_t count
, loff_t
*ppos
)
988 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
991 static ssize_t
write_enabled_file_bool(struct file
*file
,
992 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
997 buf_size
= min(count
, (sizeof(buf
)-1));
998 if (copy_from_user(buf
, user_buf
, buf_size
))
1005 enable_all_kprobes();
1010 disable_all_kprobes();
1017 static struct file_operations fops_kp
= {
1018 .read
= read_enabled_file_bool
,
1019 .write
= write_enabled_file_bool
,
1022 static int __kprobes
debugfs_kprobe_init(void)
1024 struct dentry
*dir
, *file
;
1025 unsigned int value
= 1;
1027 dir
= debugfs_create_dir("kprobes", NULL
);
1031 file
= debugfs_create_file("list", 0444, dir
, NULL
,
1032 &debugfs_kprobes_operations
);
1034 debugfs_remove(dir
);
1038 file
= debugfs_create_file("enabled", 0600, dir
,
1041 debugfs_remove(dir
);
1048 late_initcall(debugfs_kprobe_init
);
1049 #endif /* CONFIG_DEBUG_FS */
1051 module_init(init_kprobes
);
1053 EXPORT_SYMBOL_GPL(register_kprobe
);
1054 EXPORT_SYMBOL_GPL(unregister_kprobe
);
1055 EXPORT_SYMBOL_GPL(register_jprobe
);
1056 EXPORT_SYMBOL_GPL(unregister_jprobe
);
1057 EXPORT_SYMBOL_GPL(jprobe_return
);
1058 EXPORT_SYMBOL_GPL(register_kretprobe
);
1059 EXPORT_SYMBOL_GPL(unregister_kretprobe
);