2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <asm/cacheflush.h>
41 #include <asm/errno.h>
42 #include <asm/kdebug.h>
44 #define KPROBE_HASH_BITS 6
45 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
47 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
48 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
50 unsigned int kprobe_cpu
= NR_CPUS
;
51 static DEFINE_SPINLOCK(kprobe_lock
);
52 static struct kprobe
*curr_kprobe
;
55 * kprobe->ainsn.insn points to the copy of the instruction to be
56 * single-stepped. x86_64, POWER4 and above have no-exec support and
57 * stepping on the instruction on a vmalloced/kmalloced/data page
58 * is a recipe for disaster
60 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
62 struct kprobe_insn_page
{
63 struct hlist_node hlist
;
64 kprobe_opcode_t
*insns
; /* Page of instruction slots */
65 char slot_used
[INSNS_PER_PAGE
];
69 static struct hlist_head kprobe_insn_pages
;
72 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones.
75 kprobe_opcode_t
*get_insn_slot(void)
77 struct kprobe_insn_page
*kip
;
78 struct hlist_node
*pos
;
80 hlist_for_each(pos
, &kprobe_insn_pages
) {
81 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
82 if (kip
->nused
< INSNS_PER_PAGE
) {
84 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
85 if (!kip
->slot_used
[i
]) {
86 kip
->slot_used
[i
] = 1;
88 return kip
->insns
+ (i
* MAX_INSN_SIZE
);
91 /* Surprise! No unused slots. Fix kip->nused. */
92 kip
->nused
= INSNS_PER_PAGE
;
96 /* All out of space. Need to allocate a new page. Use slot 0.*/
97 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
103 * Use module_alloc so this page is within +/- 2GB of where the
104 * kernel image and loaded module images reside. This is required
105 * so x86_64 can correctly handle the %rip-relative fixups.
107 kip
->insns
= module_alloc(PAGE_SIZE
);
112 INIT_HLIST_NODE(&kip
->hlist
);
113 hlist_add_head(&kip
->hlist
, &kprobe_insn_pages
);
114 memset(kip
->slot_used
, 0, INSNS_PER_PAGE
);
115 kip
->slot_used
[0] = 1;
120 void free_insn_slot(kprobe_opcode_t
*slot
)
122 struct kprobe_insn_page
*kip
;
123 struct hlist_node
*pos
;
125 hlist_for_each(pos
, &kprobe_insn_pages
) {
126 kip
= hlist_entry(pos
, struct kprobe_insn_page
, hlist
);
127 if (kip
->insns
<= slot
&&
128 slot
< kip
->insns
+ (INSNS_PER_PAGE
* MAX_INSN_SIZE
)) {
129 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
130 kip
->slot_used
[i
] = 0;
132 if (kip
->nused
== 0) {
134 * Page is no longer in use. Free it unless
135 * it's the last one. We keep the last one
136 * so as not to have to set it up again the
137 * next time somebody inserts a probe.
139 hlist_del(&kip
->hlist
);
140 if (hlist_empty(&kprobe_insn_pages
)) {
141 INIT_HLIST_NODE(&kip
->hlist
);
142 hlist_add_head(&kip
->hlist
,
145 module_free(NULL
, kip
->insns
);
154 /* Locks kprobe: irqs must be disabled */
155 void lock_kprobes(void)
157 spin_lock(&kprobe_lock
);
158 kprobe_cpu
= smp_processor_id();
161 void unlock_kprobes(void)
163 kprobe_cpu
= NR_CPUS
;
164 spin_unlock(&kprobe_lock
);
167 /* You have to be holding the kprobe_lock */
168 struct kprobe
*get_kprobe(void *addr
)
170 struct hlist_head
*head
;
171 struct hlist_node
*node
;
173 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
174 hlist_for_each(node
, head
) {
175 struct kprobe
*p
= hlist_entry(node
, struct kprobe
, hlist
);
183 * Aggregate handlers for multiple kprobes support - these handlers
184 * take care of invoking the individual kprobe handlers on p->list
186 static int aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
190 list_for_each_entry(kp
, &p
->list
, list
) {
191 if (kp
->pre_handler
) {
193 if (kp
->pre_handler(kp
, regs
))
201 static void aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
206 list_for_each_entry(kp
, &p
->list
, list
) {
207 if (kp
->post_handler
) {
209 kp
->post_handler(kp
, regs
, flags
);
216 static int aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
220 * if we faulted "during" the execution of a user specified
221 * probe handler, invoke just that probe's fault handler
223 if (curr_kprobe
&& curr_kprobe
->fault_handler
) {
224 if (curr_kprobe
->fault_handler(curr_kprobe
, regs
, trapnr
))
230 static int aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
232 struct kprobe
*kp
= curr_kprobe
;
233 if (curr_kprobe
&& kp
->break_handler
) {
234 if (kp
->break_handler(kp
, regs
)) {
243 struct kretprobe_instance
*get_free_rp_inst(struct kretprobe
*rp
)
245 struct hlist_node
*node
;
246 struct kretprobe_instance
*ri
;
247 hlist_for_each_entry(ri
, node
, &rp
->free_instances
, uflist
)
252 static struct kretprobe_instance
*get_used_rp_inst(struct kretprobe
*rp
)
254 struct hlist_node
*node
;
255 struct kretprobe_instance
*ri
;
256 hlist_for_each_entry(ri
, node
, &rp
->used_instances
, uflist
)
261 void add_rp_inst(struct kretprobe_instance
*ri
)
264 * Remove rp inst off the free list -
265 * Add it back when probed function returns
267 hlist_del(&ri
->uflist
);
269 /* Add rp inst onto table */
270 INIT_HLIST_NODE(&ri
->hlist
);
271 hlist_add_head(&ri
->hlist
,
272 &kretprobe_inst_table
[hash_ptr(ri
->task
, KPROBE_HASH_BITS
)]);
274 /* Also add this rp inst to the used list. */
275 INIT_HLIST_NODE(&ri
->uflist
);
276 hlist_add_head(&ri
->uflist
, &ri
->rp
->used_instances
);
279 void recycle_rp_inst(struct kretprobe_instance
*ri
)
281 /* remove rp inst off the rprobe_inst_table */
282 hlist_del(&ri
->hlist
);
284 /* remove rp inst off the used list */
285 hlist_del(&ri
->uflist
);
286 /* put rp inst back onto the free list */
287 INIT_HLIST_NODE(&ri
->uflist
);
288 hlist_add_head(&ri
->uflist
, &ri
->rp
->free_instances
);
294 struct hlist_head
* kretprobe_inst_table_head(struct task_struct
*tsk
)
296 return &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)];
300 * This function is called from exit_thread or flush_thread when task tk's
301 * stack is being recycled so that we can recycle any function-return probe
302 * instances associated with this task. These left over instances represent
303 * probed functions that have been called but will never return.
305 void kprobe_flush_task(struct task_struct
*tk
)
307 struct kretprobe_instance
*ri
;
308 struct hlist_head
*head
;
309 struct hlist_node
*node
, *tmp
;
310 unsigned long flags
= 0;
312 spin_lock_irqsave(&kprobe_lock
, flags
);
313 head
= kretprobe_inst_table_head(current
);
314 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
318 spin_unlock_irqrestore(&kprobe_lock
, flags
);
322 * This kprobe pre_handler is registered with every kretprobe. When probe
323 * hits it will set up the return probe.
325 static int pre_handler_kretprobe(struct kprobe
*p
, struct pt_regs
*regs
)
327 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
329 /*TODO: consider to only swap the RA after the last pre_handler fired */
330 arch_prepare_kretprobe(rp
, regs
);
334 static inline void free_rp_inst(struct kretprobe
*rp
)
336 struct kretprobe_instance
*ri
;
337 while ((ri
= get_free_rp_inst(rp
)) != NULL
) {
338 hlist_del(&ri
->uflist
);
344 * Keep all fields in the kprobe consistent
346 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
348 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
349 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
353 * Add the new probe to old_p->list. Fail if this is the
354 * second jprobe at the address - two jprobes can't coexist
356 static int add_new_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
360 if (p
->break_handler
) {
361 list_for_each_entry(kp
, &old_p
->list
, list
) {
362 if (kp
->break_handler
)
365 list_add_tail(&p
->list
, &old_p
->list
);
367 list_add(&p
->list
, &old_p
->list
);
372 * Fill in the required fields of the "manager kprobe". Replace the
373 * earlier kprobe in the hlist with the manager kprobe
375 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
379 ap
->pre_handler
= aggr_pre_handler
;
380 ap
->post_handler
= aggr_post_handler
;
381 ap
->fault_handler
= aggr_fault_handler
;
382 ap
->break_handler
= aggr_break_handler
;
384 INIT_LIST_HEAD(&ap
->list
);
385 list_add(&p
->list
, &ap
->list
);
387 INIT_HLIST_NODE(&ap
->hlist
);
388 hlist_del(&p
->hlist
);
389 hlist_add_head(&ap
->hlist
,
390 &kprobe_table
[hash_ptr(ap
->addr
, KPROBE_HASH_BITS
)]);
394 * This is the second or subsequent kprobe at the address - handle
396 * TODO: Move kcalloc outside the spinlock
398 static int register_aggr_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
403 if (old_p
->pre_handler
== aggr_pre_handler
) {
404 copy_kprobe(old_p
, p
);
405 ret
= add_new_kprobe(old_p
, p
);
407 ap
= kcalloc(1, sizeof(struct kprobe
), GFP_ATOMIC
);
410 add_aggr_kprobe(ap
, old_p
);
412 ret
= add_new_kprobe(ap
, p
);
417 /* kprobe removal house-keeping routines */
418 static inline void cleanup_kprobe(struct kprobe
*p
, unsigned long flags
)
420 arch_disarm_kprobe(p
);
421 hlist_del(&p
->hlist
);
422 spin_unlock_irqrestore(&kprobe_lock
, flags
);
423 arch_remove_kprobe(p
);
426 static inline void cleanup_aggr_kprobe(struct kprobe
*old_p
,
427 struct kprobe
*p
, unsigned long flags
)
430 if (list_empty(&old_p
->list
)) {
431 cleanup_kprobe(old_p
, flags
);
434 spin_unlock_irqrestore(&kprobe_lock
, flags
);
437 int register_kprobe(struct kprobe
*p
)
440 unsigned long flags
= 0;
441 struct kprobe
*old_p
;
443 if ((ret
= arch_prepare_kprobe(p
)) != 0) {
446 spin_lock_irqsave(&kprobe_lock
, flags
);
447 old_p
= get_kprobe(p
->addr
);
450 ret
= register_aggr_kprobe(old_p
, p
);
455 INIT_HLIST_NODE(&p
->hlist
);
456 hlist_add_head(&p
->hlist
,
457 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
462 spin_unlock_irqrestore(&kprobe_lock
, flags
);
465 arch_remove_kprobe(p
);
469 void unregister_kprobe(struct kprobe
*p
)
472 struct kprobe
*old_p
;
474 spin_lock_irqsave(&kprobe_lock
, flags
);
475 old_p
= get_kprobe(p
->addr
);
477 if (old_p
->pre_handler
== aggr_pre_handler
)
478 cleanup_aggr_kprobe(old_p
, p
, flags
);
480 cleanup_kprobe(p
, flags
);
482 spin_unlock_irqrestore(&kprobe_lock
, flags
);
485 static struct notifier_block kprobe_exceptions_nb
= {
486 .notifier_call
= kprobe_exceptions_notify
,
487 .priority
= 0x7fffffff /* we need to notified first */
490 int register_jprobe(struct jprobe
*jp
)
492 /* Todo: Verify probepoint is a function entry point */
493 jp
->kp
.pre_handler
= setjmp_pre_handler
;
494 jp
->kp
.break_handler
= longjmp_break_handler
;
496 return register_kprobe(&jp
->kp
);
499 void unregister_jprobe(struct jprobe
*jp
)
501 unregister_kprobe(&jp
->kp
);
504 #ifdef ARCH_SUPPORTS_KRETPROBES
506 int register_kretprobe(struct kretprobe
*rp
)
509 struct kretprobe_instance
*inst
;
512 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
514 /* Pre-allocate memory for max kretprobe instances */
515 if (rp
->maxactive
<= 0) {
516 #ifdef CONFIG_PREEMPT
517 rp
->maxactive
= max(10, 2 * NR_CPUS
);
519 rp
->maxactive
= NR_CPUS
;
522 INIT_HLIST_HEAD(&rp
->used_instances
);
523 INIT_HLIST_HEAD(&rp
->free_instances
);
524 for (i
= 0; i
< rp
->maxactive
; i
++) {
525 inst
= kmalloc(sizeof(struct kretprobe_instance
), GFP_KERNEL
);
530 INIT_HLIST_NODE(&inst
->uflist
);
531 hlist_add_head(&inst
->uflist
, &rp
->free_instances
);
535 /* Establish function entry probe point */
536 if ((ret
= register_kprobe(&rp
->kp
)) != 0)
541 #else /* ARCH_SUPPORTS_KRETPROBES */
543 int register_kretprobe(struct kretprobe
*rp
)
548 #endif /* ARCH_SUPPORTS_KRETPROBES */
550 void unregister_kretprobe(struct kretprobe
*rp
)
553 struct kretprobe_instance
*ri
;
555 unregister_kprobe(&rp
->kp
);
557 spin_lock_irqsave(&kprobe_lock
, flags
);
559 while ((ri
= get_used_rp_inst(rp
)) != NULL
) {
561 hlist_del(&ri
->uflist
);
563 spin_unlock_irqrestore(&kprobe_lock
, flags
);
566 static int __init
init_kprobes(void)
570 /* FIXME allocate the probe table, currently defined statically */
571 /* initialize all list heads */
572 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
573 INIT_HLIST_HEAD(&kprobe_table
[i
]);
574 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
577 err
= arch_init_kprobes();
579 err
= register_die_notifier(&kprobe_exceptions_nb
);
584 __initcall(init_kprobes
);
586 EXPORT_SYMBOL_GPL(register_kprobe
);
587 EXPORT_SYMBOL_GPL(unregister_kprobe
);
588 EXPORT_SYMBOL_GPL(register_jprobe
);
589 EXPORT_SYMBOL_GPL(unregister_jprobe
);
590 EXPORT_SYMBOL_GPL(jprobe_return
);
591 EXPORT_SYMBOL_GPL(register_kretprobe
);
592 EXPORT_SYMBOL_GPL(unregister_kretprobe
);