2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <linux/kallsyms.h>
41 #include <asm-generic/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <asm/kdebug.h>
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
51 * Some oddball architectures like 64bit powerpc have function descriptors
52 * so this must be overridable.
54 #ifndef kprobe_lookup_name
55 #define kprobe_lookup_name(name, addr) \
56 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
59 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
60 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
61 static atomic_t kprobe_count
;
63 DEFINE_MUTEX(kprobe_mutex
); /* Protects kprobe_table */
64 DEFINE_SPINLOCK(kretprobe_lock
); /* Protects kretprobe_inst_table */
65 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
67 static struct notifier_block kprobe_page_fault_nb
= {
68 .notifier_call
= kprobe_exceptions_notify
,
69 .priority
= 0x7fffffff /* we need to notified first */
72 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
74 * kprobe->ainsn.insn points to the copy of the instruction to be
75 * single-stepped. x86_64, POWER4 and above have no-exec support and
76 * stepping on the instruction on a vmalloced/kmalloced/data page
77 * is a recipe for disaster
79 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
81 struct kprobe_insn_page
{
82 struct hlist_node hlist
;
83 kprobe_opcode_t
*insns
; /* Page of instruction slots */
84 char slot_used
[INSNS_PER_PAGE
];
88 static struct hlist_head kprobe_insn_pages
;
91 * get_insn_slot() - Find a slot on an executable page for an instruction.
92 * We allocate an executable page if there's no room on existing ones.
94 kprobe_opcode_t __kprobes
*get_insn_slot(void)
96 struct kprobe_insn_page
*kip
;
97 struct hlist_node
*pos
;
99 hlist_for_each(pos
, &kprobe_insn_pages
) {
100 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
101 if (kip
->nused
< INSNS_PER_PAGE
) {
103 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
104 if (!kip
->slot_used
[i
]) {
105 kip
->slot_used
[i
] = 1;
107 return kip
->insns
+ (i
* MAX_INSN_SIZE
);
110 /* Surprise! No unused slots. Fix kip->nused. */
111 kip
->nused
= INSNS_PER_PAGE
;
115 /* All out of space. Need to allocate a new page. Use slot 0.*/
116 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
122 * Use module_alloc so this page is within +/- 2GB of where the
123 * kernel image and loaded module images reside. This is required
124 * so x86_64 can correctly handle the %rip-relative fixups.
126 kip
->insns
= module_alloc(PAGE_SIZE
);
131 INIT_HLIST_NODE(&kip
->hlist
);
132 hlist_add_head(&kip
->hlist
, &kprobe_insn_pages
);
133 memset(kip
->slot_used
, 0, INSNS_PER_PAGE
);
134 kip
->slot_used
[0] = 1;
139 void __kprobes
free_insn_slot(kprobe_opcode_t
*slot
)
141 struct kprobe_insn_page
*kip
;
142 struct hlist_node
*pos
;
144 hlist_for_each(pos
, &kprobe_insn_pages
) {
145 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
146 if (kip
->insns
<= slot
&&
147 slot
< kip
->insns
+ (INSNS_PER_PAGE
* MAX_INSN_SIZE
)) {
148 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
149 kip
->slot_used
[i
] = 0;
151 if (kip
->nused
== 0) {
153 * Page is no longer in use. Free it unless
154 * it's the last one. We keep the last one
155 * so as not to have to set it up again the
156 * next time somebody inserts a probe.
158 hlist_del(&kip
->hlist
);
159 if (hlist_empty(&kprobe_insn_pages
)) {
160 INIT_HLIST_NODE(&kip
->hlist
);
161 hlist_add_head(&kip
->hlist
,
164 module_free(NULL
, kip
->insns
);
174 /* We have preemption disabled.. so it is safe to use __ versions */
175 static inline void set_kprobe_instance(struct kprobe
*kp
)
177 __get_cpu_var(kprobe_instance
) = kp
;
180 static inline void reset_kprobe_instance(void)
182 __get_cpu_var(kprobe_instance
) = NULL
;
186 * This routine is called either:
187 * - under the kprobe_mutex - during kprobe_[un]register()
189 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
191 struct kprobe __kprobes
*get_kprobe(void *addr
)
193 struct hlist_head
*head
;
194 struct hlist_node
*node
;
197 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
198 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
206 * Aggregate handlers for multiple kprobes support - these handlers
207 * take care of invoking the individual kprobe handlers on p->list
209 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
213 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
214 if (kp
->pre_handler
) {
215 set_kprobe_instance(kp
);
216 if (kp
->pre_handler(kp
, regs
))
219 reset_kprobe_instance();
224 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
229 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
230 if (kp
->post_handler
) {
231 set_kprobe_instance(kp
);
232 kp
->post_handler(kp
, regs
, flags
);
233 reset_kprobe_instance();
239 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
242 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
245 * if we faulted "during" the execution of a user specified
246 * probe handler, invoke just that probe's fault handler
248 if (cur
&& cur
->fault_handler
) {
249 if (cur
->fault_handler(cur
, regs
, trapnr
))
255 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
257 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
260 if (cur
&& cur
->break_handler
) {
261 if (cur
->break_handler(cur
, regs
))
264 reset_kprobe_instance();
268 /* Walks the list and increments nmissed count for multiprobe case */
269 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
272 if (p
->pre_handler
!= aggr_pre_handler
) {
275 list_for_each_entry_rcu(kp
, &p
->list
, list
)
281 /* Called with kretprobe_lock held */
282 struct kretprobe_instance __kprobes
*get_free_rp_inst(struct kretprobe
*rp
)
284 struct hlist_node
*node
;
285 struct kretprobe_instance
*ri
;
286 hlist_for_each_entry(ri
, node
, &rp
->free_instances
, uflist
)
291 /* Called with kretprobe_lock held */
292 static struct kretprobe_instance __kprobes
*get_used_rp_inst(struct kretprobe
295 struct hlist_node
*node
;
296 struct kretprobe_instance
*ri
;
297 hlist_for_each_entry(ri
, node
, &rp
->used_instances
, uflist
)
302 /* Called with kretprobe_lock held */
303 void __kprobes
add_rp_inst(struct kretprobe_instance
*ri
)
306 * Remove rp inst off the free list -
307 * Add it back when probed function returns
309 hlist_del(&ri
->uflist
);
311 /* Add rp inst onto table */
312 INIT_HLIST_NODE(&ri
->hlist
);
313 hlist_add_head(&ri
->hlist
,
314 &kretprobe_inst_table
[hash_ptr(ri
->task
, KPROBE_HASH_BITS
)]);
316 /* Also add this rp inst to the used list. */
317 INIT_HLIST_NODE(&ri
->uflist
);
318 hlist_add_head(&ri
->uflist
, &ri
->rp
->used_instances
);
321 /* Called with kretprobe_lock held */
322 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
)
324 /* remove rp inst off the rprobe_inst_table */
325 hlist_del(&ri
->hlist
);
327 /* remove rp inst off the used list */
328 hlist_del(&ri
->uflist
);
329 /* put rp inst back onto the free list */
330 INIT_HLIST_NODE(&ri
->uflist
);
331 hlist_add_head(&ri
->uflist
, &ri
->rp
->free_instances
);
337 struct hlist_head __kprobes
*kretprobe_inst_table_head(struct task_struct
*tsk
)
339 return &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)];
343 * This function is called from finish_task_switch when task tk becomes dead,
344 * so that we can recycle any function-return probe instances associated
345 * with this task. These left over instances represent probed functions
346 * that have been called but will never return.
348 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
350 struct kretprobe_instance
*ri
;
351 struct hlist_head
*head
;
352 struct hlist_node
*node
, *tmp
;
353 unsigned long flags
= 0;
355 spin_lock_irqsave(&kretprobe_lock
, flags
);
356 head
= kretprobe_inst_table_head(tk
);
357 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
361 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
364 static inline void free_rp_inst(struct kretprobe
*rp
)
366 struct kretprobe_instance
*ri
;
367 while ((ri
= get_free_rp_inst(rp
)) != NULL
) {
368 hlist_del(&ri
->uflist
);
374 * Keep all fields in the kprobe consistent
376 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
378 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
379 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
383 * Add the new probe to old_p->list. Fail if this is the
384 * second jprobe at the address - two jprobes can't coexist
386 static int __kprobes
add_new_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
388 if (p
->break_handler
) {
389 if (old_p
->break_handler
)
391 list_add_tail_rcu(&p
->list
, &old_p
->list
);
392 old_p
->break_handler
= aggr_break_handler
;
394 list_add_rcu(&p
->list
, &old_p
->list
);
395 if (p
->post_handler
&& !old_p
->post_handler
)
396 old_p
->post_handler
= aggr_post_handler
;
401 * Fill in the required fields of the "manager kprobe". Replace the
402 * earlier kprobe in the hlist with the manager kprobe
404 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
409 ap
->pre_handler
= aggr_pre_handler
;
410 ap
->fault_handler
= aggr_fault_handler
;
412 ap
->post_handler
= aggr_post_handler
;
413 if (p
->break_handler
)
414 ap
->break_handler
= aggr_break_handler
;
416 INIT_LIST_HEAD(&ap
->list
);
417 list_add_rcu(&p
->list
, &ap
->list
);
419 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
423 * This is the second or subsequent kprobe at the address - handle
426 static int __kprobes
register_aggr_kprobe(struct kprobe
*old_p
,
432 if (old_p
->pre_handler
== aggr_pre_handler
) {
433 copy_kprobe(old_p
, p
);
434 ret
= add_new_kprobe(old_p
, p
);
436 ap
= kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
439 add_aggr_kprobe(ap
, old_p
);
441 ret
= add_new_kprobe(ap
, p
);
446 static int __kprobes
in_kprobes_functions(unsigned long addr
)
448 if (addr
>= (unsigned long)__kprobes_text_start
449 && addr
< (unsigned long)__kprobes_text_end
)
454 static int __kprobes
__register_kprobe(struct kprobe
*p
,
455 unsigned long called_from
)
458 struct kprobe
*old_p
;
459 struct module
*probed_mod
;
462 * If we have a symbol_name argument look it up,
463 * and add it to the address. That way the addr
464 * field can either be global or relative to a symbol.
466 if (p
->symbol_name
) {
469 kprobe_lookup_name(p
->symbol_name
, p
->addr
);
474 p
->addr
= (kprobe_opcode_t
*)(((char *)p
->addr
)+ p
->offset
);
476 if ((!kernel_text_address((unsigned long) p
->addr
)) ||
477 in_kprobes_functions((unsigned long) p
->addr
))
480 p
->mod_refcounted
= 0;
481 /* Check are we probing a module */
482 if ((probed_mod
= module_text_address((unsigned long) p
->addr
))) {
483 struct module
*calling_mod
= module_text_address(called_from
);
484 /* We must allow modules to probe themself and
485 * in this case avoid incrementing the module refcount,
486 * so as to allow unloading of self probing modules.
488 if (calling_mod
&& (calling_mod
!= probed_mod
)) {
489 if (unlikely(!try_module_get(probed_mod
)))
491 p
->mod_refcounted
= 1;
497 mutex_lock(&kprobe_mutex
);
498 old_p
= get_kprobe(p
->addr
);
500 ret
= register_aggr_kprobe(old_p
, p
);
502 atomic_inc(&kprobe_count
);
506 if ((ret
= arch_prepare_kprobe(p
)) != 0)
509 INIT_HLIST_NODE(&p
->hlist
);
510 hlist_add_head_rcu(&p
->hlist
,
511 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
513 if (atomic_add_return(1, &kprobe_count
) == \
514 (ARCH_INACTIVE_KPROBE_COUNT
+ 1))
515 register_page_fault_notifier(&kprobe_page_fault_nb
);
520 mutex_unlock(&kprobe_mutex
);
522 if (ret
&& probed_mod
)
523 module_put(probed_mod
);
527 int __kprobes
register_kprobe(struct kprobe
*p
)
529 return __register_kprobe(p
,
530 (unsigned long)__builtin_return_address(0));
533 void __kprobes
unregister_kprobe(struct kprobe
*p
)
536 struct kprobe
*old_p
, *list_p
;
539 mutex_lock(&kprobe_mutex
);
540 old_p
= get_kprobe(p
->addr
);
541 if (unlikely(!old_p
)) {
542 mutex_unlock(&kprobe_mutex
);
546 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
)
548 /* kprobe p is a valid probe */
550 mutex_unlock(&kprobe_mutex
);
554 if ((old_p
== p
) || ((old_p
->pre_handler
== aggr_pre_handler
) &&
555 (p
->list
.next
== &old_p
->list
) &&
556 (p
->list
.prev
== &old_p
->list
))) {
557 /* Only probe on the hash list */
558 arch_disarm_kprobe(p
);
559 hlist_del_rcu(&old_p
->hlist
);
562 list_del_rcu(&p
->list
);
566 mutex_unlock(&kprobe_mutex
);
569 if (p
->mod_refcounted
&&
570 (mod
= module_text_address((unsigned long)p
->addr
)))
575 list_del_rcu(&p
->list
);
578 arch_remove_kprobe(p
);
580 mutex_lock(&kprobe_mutex
);
581 if (p
->break_handler
)
582 old_p
->break_handler
= NULL
;
583 if (p
->post_handler
){
584 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
){
585 if (list_p
->post_handler
){
591 old_p
->post_handler
= NULL
;
593 mutex_unlock(&kprobe_mutex
);
596 /* Call unregister_page_fault_notifier()
597 * if no probes are active
599 mutex_lock(&kprobe_mutex
);
600 if (atomic_add_return(-1, &kprobe_count
) == \
601 ARCH_INACTIVE_KPROBE_COUNT
)
602 unregister_page_fault_notifier(&kprobe_page_fault_nb
);
603 mutex_unlock(&kprobe_mutex
);
607 static struct notifier_block kprobe_exceptions_nb
= {
608 .notifier_call
= kprobe_exceptions_notify
,
609 .priority
= 0x7fffffff /* we need to be notified first */
613 int __kprobes
register_jprobe(struct jprobe
*jp
)
615 /* Todo: Verify probepoint is a function entry point */
616 jp
->kp
.pre_handler
= setjmp_pre_handler
;
617 jp
->kp
.break_handler
= longjmp_break_handler
;
619 return __register_kprobe(&jp
->kp
,
620 (unsigned long)__builtin_return_address(0));
623 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
625 unregister_kprobe(&jp
->kp
);
628 #ifdef ARCH_SUPPORTS_KRETPROBES
631 * This kprobe pre_handler is registered with every kretprobe. When probe
632 * hits it will set up the return probe.
634 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
635 struct pt_regs
*regs
)
637 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
638 unsigned long flags
= 0;
640 /*TODO: consider to only swap the RA after the last pre_handler fired */
641 spin_lock_irqsave(&kretprobe_lock
, flags
);
642 arch_prepare_kretprobe(rp
, regs
);
643 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
647 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
650 struct kretprobe_instance
*inst
;
653 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
654 rp
->kp
.post_handler
= NULL
;
655 rp
->kp
.fault_handler
= NULL
;
656 rp
->kp
.break_handler
= NULL
;
658 /* Pre-allocate memory for max kretprobe instances */
659 if (rp
->maxactive
<= 0) {
660 #ifdef CONFIG_PREEMPT
661 rp
->maxactive
= max(10, 2 * NR_CPUS
);
663 rp
->maxactive
= NR_CPUS
;
666 INIT_HLIST_HEAD(&rp
->used_instances
);
667 INIT_HLIST_HEAD(&rp
->free_instances
);
668 for (i
= 0; i
< rp
->maxactive
; i
++) {
669 inst
= kmalloc(sizeof(struct kretprobe_instance
), GFP_KERNEL
);
674 INIT_HLIST_NODE(&inst
->uflist
);
675 hlist_add_head(&inst
->uflist
, &rp
->free_instances
);
679 /* Establish function entry probe point */
680 if ((ret
= __register_kprobe(&rp
->kp
,
681 (unsigned long)__builtin_return_address(0))) != 0)
686 #else /* ARCH_SUPPORTS_KRETPROBES */
688 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
693 #endif /* ARCH_SUPPORTS_KRETPROBES */
695 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
698 struct kretprobe_instance
*ri
;
700 unregister_kprobe(&rp
->kp
);
702 spin_lock_irqsave(&kretprobe_lock
, flags
);
703 while ((ri
= get_used_rp_inst(rp
)) != NULL
) {
705 hlist_del(&ri
->uflist
);
707 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
711 static int __init
init_kprobes(void)
715 /* FIXME allocate the probe table, currently defined statically */
716 /* initialize all list heads */
717 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
718 INIT_HLIST_HEAD(&kprobe_table
[i
]);
719 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
721 atomic_set(&kprobe_count
, 0);
723 err
= arch_init_kprobes();
725 err
= register_die_notifier(&kprobe_exceptions_nb
);
730 __initcall(init_kprobes
);
732 EXPORT_SYMBOL_GPL(register_kprobe
);
733 EXPORT_SYMBOL_GPL(unregister_kprobe
);
734 EXPORT_SYMBOL_GPL(register_jprobe
);
735 EXPORT_SYMBOL_GPL(unregister_jprobe
);
736 EXPORT_SYMBOL_GPL(jprobe_return
);
737 EXPORT_SYMBOL_GPL(register_kretprobe
);
738 EXPORT_SYMBOL_GPL(unregister_kretprobe
);