2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <asm/cacheflush.h>
40 #include <asm/errno.h>
41 #include <asm/kdebug.h>
43 #define KPROBE_HASH_BITS 6
44 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
46 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
47 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
49 unsigned int kprobe_cpu
= NR_CPUS
;
50 static DEFINE_SPINLOCK(kprobe_lock
);
51 static struct kprobe
*curr_kprobe
;
53 /* Locks kprobe: irqs must be disabled */
54 void lock_kprobes(void)
56 spin_lock(&kprobe_lock
);
57 kprobe_cpu
= smp_processor_id();
60 void unlock_kprobes(void)
63 spin_unlock(&kprobe_lock
);
66 /* You have to be holding the kprobe_lock */
67 struct kprobe
*get_kprobe(void *addr
)
69 struct hlist_head
*head
;
70 struct hlist_node
*node
;
72 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
73 hlist_for_each(node
, head
) {
74 struct kprobe
*p
= hlist_entry(node
, struct kprobe
, hlist
);
82 * Aggregate handlers for multiple kprobes support - these handlers
83 * take care of invoking the individual kprobe handlers on p->list
85 static int aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
89 list_for_each_entry(kp
, &p
->list
, list
) {
90 if (kp
->pre_handler
) {
92 if (kp
->pre_handler(kp
, regs
))
100 static void aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
105 list_for_each_entry(kp
, &p
->list
, list
) {
106 if (kp
->post_handler
) {
108 kp
->post_handler(kp
, regs
, flags
);
115 static int aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
119 * if we faulted "during" the execution of a user specified
120 * probe handler, invoke just that probe's fault handler
122 if (curr_kprobe
&& curr_kprobe
->fault_handler
) {
123 if (curr_kprobe
->fault_handler(curr_kprobe
, regs
, trapnr
))
129 static int aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
131 struct kprobe
*kp
= curr_kprobe
;
132 if (curr_kprobe
&& kp
->break_handler
) {
133 if (kp
->break_handler(kp
, regs
)) {
142 struct kprobe trampoline_p
= {
143 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
144 .pre_handler
= trampoline_probe_handler
,
145 .post_handler
= trampoline_post_handler
148 struct kretprobe_instance
*get_free_rp_inst(struct kretprobe
*rp
)
150 struct hlist_node
*node
;
151 struct kretprobe_instance
*ri
;
152 hlist_for_each_entry(ri
, node
, &rp
->free_instances
, uflist
)
157 static struct kretprobe_instance
*get_used_rp_inst(struct kretprobe
*rp
)
159 struct hlist_node
*node
;
160 struct kretprobe_instance
*ri
;
161 hlist_for_each_entry(ri
, node
, &rp
->used_instances
, uflist
)
166 struct kretprobe_instance
*get_rp_inst(void *sara
)
168 struct hlist_head
*head
;
169 struct hlist_node
*node
;
170 struct task_struct
*tsk
;
171 struct kretprobe_instance
*ri
;
173 tsk
= arch_get_kprobe_task(sara
);
174 head
= &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)];
175 hlist_for_each_entry(ri
, node
, head
, hlist
) {
176 if (ri
->stack_addr
== sara
)
182 void add_rp_inst(struct kretprobe_instance
*ri
)
184 struct task_struct
*tsk
;
186 * Remove rp inst off the free list -
187 * Add it back when probed function returns
189 hlist_del(&ri
->uflist
);
190 tsk
= arch_get_kprobe_task(ri
->stack_addr
);
191 /* Add rp inst onto table */
192 INIT_HLIST_NODE(&ri
->hlist
);
193 hlist_add_head(&ri
->hlist
,
194 &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)]);
196 /* Also add this rp inst to the used list. */
197 INIT_HLIST_NODE(&ri
->uflist
);
198 hlist_add_head(&ri
->uflist
, &ri
->rp
->used_instances
);
201 void recycle_rp_inst(struct kretprobe_instance
*ri
)
203 /* remove rp inst off the rprobe_inst_table */
204 hlist_del(&ri
->hlist
);
206 /* remove rp inst off the used list */
207 hlist_del(&ri
->uflist
);
208 /* put rp inst back onto the free list */
209 INIT_HLIST_NODE(&ri
->uflist
);
210 hlist_add_head(&ri
->uflist
, &ri
->rp
->free_instances
);
216 struct hlist_head
* kretprobe_inst_table_head(struct task_struct
*tsk
)
218 return &kretprobe_inst_table
[hash_ptr(tsk
, KPROBE_HASH_BITS
)];
221 struct kretprobe_instance
*get_rp_inst_tsk(struct task_struct
*tk
)
223 struct task_struct
*tsk
;
224 struct hlist_head
*head
;
225 struct hlist_node
*node
;
226 struct kretprobe_instance
*ri
;
228 head
= &kretprobe_inst_table
[hash_ptr(tk
, KPROBE_HASH_BITS
)];
230 hlist_for_each_entry(ri
, node
, head
, hlist
) {
231 tsk
= arch_get_kprobe_task(ri
->stack_addr
);
239 * This function is called from do_exit or do_execv when task tk's stack is
240 * about to be recycled. Recycle any function-return probe instances
241 * associated with this task. These represent probed functions that have
242 * been called but may never return.
244 void kprobe_flush_task(struct task_struct
*tk
)
246 unsigned long flags
= 0;
247 spin_lock_irqsave(&kprobe_lock
, flags
);
248 arch_kprobe_flush_task(tk
);
249 spin_unlock_irqrestore(&kprobe_lock
, flags
);
253 * This kprobe pre_handler is registered with every kretprobe. When probe
254 * hits it will set up the return probe.
256 static int pre_handler_kretprobe(struct kprobe
*p
, struct pt_regs
*regs
)
258 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
260 /*TODO: consider to only swap the RA after the last pre_handler fired */
261 arch_prepare_kretprobe(rp
, regs
);
265 static inline void free_rp_inst(struct kretprobe
*rp
)
267 struct kretprobe_instance
*ri
;
268 while ((ri
= get_free_rp_inst(rp
)) != NULL
) {
269 hlist_del(&ri
->uflist
);
275 * Keep all fields in the kprobe consistent
277 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
279 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
280 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
284 * Add the new probe to old_p->list. Fail if this is the
285 * second jprobe at the address - two jprobes can't coexist
287 static int add_new_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
291 if (p
->break_handler
) {
292 list_for_each_entry(kp
, &old_p
->list
, list
) {
293 if (kp
->break_handler
)
296 list_add_tail(&p
->list
, &old_p
->list
);
298 list_add(&p
->list
, &old_p
->list
);
303 * Fill in the required fields of the "manager kprobe". Replace the
304 * earlier kprobe in the hlist with the manager kprobe
306 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
310 ap
->pre_handler
= aggr_pre_handler
;
311 ap
->post_handler
= aggr_post_handler
;
312 ap
->fault_handler
= aggr_fault_handler
;
313 ap
->break_handler
= aggr_break_handler
;
315 INIT_LIST_HEAD(&ap
->list
);
316 list_add(&p
->list
, &ap
->list
);
318 INIT_HLIST_NODE(&ap
->hlist
);
319 hlist_del(&p
->hlist
);
320 hlist_add_head(&ap
->hlist
,
321 &kprobe_table
[hash_ptr(ap
->addr
, KPROBE_HASH_BITS
)]);
325 * This is the second or subsequent kprobe at the address - handle
327 * TODO: Move kcalloc outside the spinlock
329 static int register_aggr_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
334 if (old_p
->pre_handler
== aggr_pre_handler
) {
335 copy_kprobe(old_p
, p
);
336 ret
= add_new_kprobe(old_p
, p
);
338 ap
= kcalloc(1, sizeof(struct kprobe
), GFP_ATOMIC
);
341 add_aggr_kprobe(ap
, old_p
);
343 ret
= add_new_kprobe(ap
, p
);
348 /* kprobe removal house-keeping routines */
349 static inline void cleanup_kprobe(struct kprobe
*p
, unsigned long flags
)
351 arch_disarm_kprobe(p
);
352 hlist_del(&p
->hlist
);
353 spin_unlock_irqrestore(&kprobe_lock
, flags
);
354 arch_remove_kprobe(p
);
357 static inline void cleanup_aggr_kprobe(struct kprobe
*old_p
,
358 struct kprobe
*p
, unsigned long flags
)
361 if (list_empty(&old_p
->list
)) {
362 cleanup_kprobe(old_p
, flags
);
365 spin_unlock_irqrestore(&kprobe_lock
, flags
);
368 int register_kprobe(struct kprobe
*p
)
371 unsigned long flags
= 0;
372 struct kprobe
*old_p
;
374 if ((ret
= arch_prepare_kprobe(p
)) != 0) {
377 spin_lock_irqsave(&kprobe_lock
, flags
);
378 old_p
= get_kprobe(p
->addr
);
381 ret
= register_aggr_kprobe(old_p
, p
);
386 INIT_HLIST_NODE(&p
->hlist
);
387 hlist_add_head(&p
->hlist
,
388 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
393 spin_unlock_irqrestore(&kprobe_lock
, flags
);
396 arch_remove_kprobe(p
);
400 void unregister_kprobe(struct kprobe
*p
)
403 struct kprobe
*old_p
;
405 spin_lock_irqsave(&kprobe_lock
, flags
);
406 old_p
= get_kprobe(p
->addr
);
408 if (old_p
->pre_handler
== aggr_pre_handler
)
409 cleanup_aggr_kprobe(old_p
, p
, flags
);
411 cleanup_kprobe(p
, flags
);
413 spin_unlock_irqrestore(&kprobe_lock
, flags
);
416 static struct notifier_block kprobe_exceptions_nb
= {
417 .notifier_call
= kprobe_exceptions_notify
,
418 .priority
= 0x7fffffff /* we need to notified first */
421 int register_jprobe(struct jprobe
*jp
)
423 /* Todo: Verify probepoint is a function entry point */
424 jp
->kp
.pre_handler
= setjmp_pre_handler
;
425 jp
->kp
.break_handler
= longjmp_break_handler
;
427 return register_kprobe(&jp
->kp
);
430 void unregister_jprobe(struct jprobe
*jp
)
432 unregister_kprobe(&jp
->kp
);
435 #ifdef ARCH_SUPPORTS_KRETPROBES
437 int register_kretprobe(struct kretprobe
*rp
)
440 struct kretprobe_instance
*inst
;
443 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
445 /* Pre-allocate memory for max kretprobe instances */
446 if (rp
->maxactive
<= 0) {
447 #ifdef CONFIG_PREEMPT
448 rp
->maxactive
= max(10, 2 * NR_CPUS
);
450 rp
->maxactive
= NR_CPUS
;
453 INIT_HLIST_HEAD(&rp
->used_instances
);
454 INIT_HLIST_HEAD(&rp
->free_instances
);
455 for (i
= 0; i
< rp
->maxactive
; i
++) {
456 inst
= kmalloc(sizeof(struct kretprobe_instance
), GFP_KERNEL
);
461 INIT_HLIST_NODE(&inst
->uflist
);
462 hlist_add_head(&inst
->uflist
, &rp
->free_instances
);
466 /* Establish function entry probe point */
467 if ((ret
= register_kprobe(&rp
->kp
)) != 0)
472 #else /* ARCH_SUPPORTS_KRETPROBES */
474 int register_kretprobe(struct kretprobe
*rp
)
479 #endif /* ARCH_SUPPORTS_KRETPROBES */
481 void unregister_kretprobe(struct kretprobe
*rp
)
484 struct kretprobe_instance
*ri
;
486 unregister_kprobe(&rp
->kp
);
488 spin_lock_irqsave(&kprobe_lock
, flags
);
490 while ((ri
= get_used_rp_inst(rp
)) != NULL
) {
492 hlist_del(&ri
->uflist
);
494 spin_unlock_irqrestore(&kprobe_lock
, flags
);
497 static int __init
init_kprobes(void)
501 /* FIXME allocate the probe table, currently defined statically */
502 /* initialize all list heads */
503 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
504 INIT_HLIST_HEAD(&kprobe_table
[i
]);
505 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
508 err
= register_die_notifier(&kprobe_exceptions_nb
);
509 /* Register the trampoline probe for return probe */
510 register_kprobe(&trampoline_p
);
514 __initcall(init_kprobes
);
516 EXPORT_SYMBOL_GPL(register_kprobe
);
517 EXPORT_SYMBOL_GPL(unregister_kprobe
);
518 EXPORT_SYMBOL_GPL(register_jprobe
);
519 EXPORT_SYMBOL_GPL(unregister_jprobe
);
520 EXPORT_SYMBOL_GPL(jprobe_return
);
521 EXPORT_SYMBOL_GPL(register_kretprobe
);
522 EXPORT_SYMBOL_GPL(unregister_kretprobe
);