tlan: proper shared IRQ support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / kprobes.c
blob1e0250cb94860be6931efce5e37138fa488c18db
1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63 #endif
65 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
68 /* NOTE: change this value only with kprobe_mutex held */
69 static bool kprobe_enabled;
71 DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
72 DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
73 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
76 * Normally, functions that we'd want to prohibit kprobes in, are marked
77 * __kprobes. But, there are cases where such functions already belong to
78 * a different section (__sched for preempt_schedule)
80 * For such cases, we now have a blacklist
82 struct kprobe_blackpoint kprobe_blacklist[] = {
83 {"preempt_schedule",},
84 {NULL} /* Terminator */
87 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
89 * kprobe->ainsn.insn points to the copy of the instruction to be
90 * single-stepped. x86_64, POWER4 and above have no-exec support and
91 * stepping on the instruction on a vmalloced/kmalloced/data page
92 * is a recipe for disaster
94 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
96 struct kprobe_insn_page {
97 struct hlist_node hlist;
98 kprobe_opcode_t *insns; /* Page of instruction slots */
99 char slot_used[INSNS_PER_PAGE];
100 int nused;
101 int ngarbage;
104 enum kprobe_slot_state {
105 SLOT_CLEAN = 0,
106 SLOT_DIRTY = 1,
107 SLOT_USED = 2,
110 static struct hlist_head kprobe_insn_pages;
111 static int kprobe_garbage_slots;
112 static int collect_garbage_slots(void);
114 static int __kprobes check_safety(void)
116 int ret = 0;
117 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
118 ret = freeze_processes();
119 if (ret == 0) {
120 struct task_struct *p, *q;
121 do_each_thread(p, q) {
122 if (p != current && p->state == TASK_RUNNING &&
123 p->pid != 0) {
124 printk("Check failed: %s is running\n",p->comm);
125 ret = -1;
126 goto loop_end;
128 } while_each_thread(p, q);
130 loop_end:
131 thaw_processes();
132 #else
133 synchronize_sched();
134 #endif
135 return ret;
139 * get_insn_slot() - Find a slot on an executable page for an instruction.
140 * We allocate an executable page if there's no room on existing ones.
142 kprobe_opcode_t __kprobes *get_insn_slot(void)
144 struct kprobe_insn_page *kip;
145 struct hlist_node *pos;
147 retry:
148 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
149 if (kip->nused < INSNS_PER_PAGE) {
150 int i;
151 for (i = 0; i < INSNS_PER_PAGE; i++) {
152 if (kip->slot_used[i] == SLOT_CLEAN) {
153 kip->slot_used[i] = SLOT_USED;
154 kip->nused++;
155 return kip->insns + (i * MAX_INSN_SIZE);
158 /* Surprise! No unused slots. Fix kip->nused. */
159 kip->nused = INSNS_PER_PAGE;
163 /* If there are any garbage slots, collect it and try again. */
164 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
165 goto retry;
167 /* All out of space. Need to allocate a new page. Use slot 0. */
168 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
169 if (!kip)
170 return NULL;
173 * Use module_alloc so this page is within +/- 2GB of where the
174 * kernel image and loaded module images reside. This is required
175 * so x86_64 can correctly handle the %rip-relative fixups.
177 kip->insns = module_alloc(PAGE_SIZE);
178 if (!kip->insns) {
179 kfree(kip);
180 return NULL;
182 INIT_HLIST_NODE(&kip->hlist);
183 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
184 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
185 kip->slot_used[0] = SLOT_USED;
186 kip->nused = 1;
187 kip->ngarbage = 0;
188 return kip->insns;
191 /* Return 1 if all garbages are collected, otherwise 0. */
192 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
194 kip->slot_used[idx] = SLOT_CLEAN;
195 kip->nused--;
196 if (kip->nused == 0) {
198 * Page is no longer in use. Free it unless
199 * it's the last one. We keep the last one
200 * so as not to have to set it up again the
201 * next time somebody inserts a probe.
203 hlist_del(&kip->hlist);
204 if (hlist_empty(&kprobe_insn_pages)) {
205 INIT_HLIST_NODE(&kip->hlist);
206 hlist_add_head(&kip->hlist,
207 &kprobe_insn_pages);
208 } else {
209 module_free(NULL, kip->insns);
210 kfree(kip);
212 return 1;
214 return 0;
217 static int __kprobes collect_garbage_slots(void)
219 struct kprobe_insn_page *kip;
220 struct hlist_node *pos, *next;
222 /* Ensure no-one is preepmted on the garbages */
223 if (check_safety() != 0)
224 return -EAGAIN;
226 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
227 int i;
228 if (kip->ngarbage == 0)
229 continue;
230 kip->ngarbage = 0; /* we will collect all garbages */
231 for (i = 0; i < INSNS_PER_PAGE; i++) {
232 if (kip->slot_used[i] == SLOT_DIRTY &&
233 collect_one_slot(kip, i))
234 break;
237 kprobe_garbage_slots = 0;
238 return 0;
241 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
243 struct kprobe_insn_page *kip;
244 struct hlist_node *pos;
246 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
247 if (kip->insns <= slot &&
248 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
249 int i = (slot - kip->insns) / MAX_INSN_SIZE;
250 if (dirty) {
251 kip->slot_used[i] = SLOT_DIRTY;
252 kip->ngarbage++;
253 } else {
254 collect_one_slot(kip, i);
256 break;
260 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
261 collect_garbage_slots();
263 #endif
265 /* We have preemption disabled.. so it is safe to use __ versions */
266 static inline void set_kprobe_instance(struct kprobe *kp)
268 __get_cpu_var(kprobe_instance) = kp;
271 static inline void reset_kprobe_instance(void)
273 __get_cpu_var(kprobe_instance) = NULL;
277 * This routine is called either:
278 * - under the kprobe_mutex - during kprobe_[un]register()
279 * OR
280 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
282 struct kprobe __kprobes *get_kprobe(void *addr)
284 struct hlist_head *head;
285 struct hlist_node *node;
286 struct kprobe *p;
288 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
289 hlist_for_each_entry_rcu(p, node, head, hlist) {
290 if (p->addr == addr)
291 return p;
293 return NULL;
297 * Aggregate handlers for multiple kprobes support - these handlers
298 * take care of invoking the individual kprobe handlers on p->list
300 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
302 struct kprobe *kp;
304 list_for_each_entry_rcu(kp, &p->list, list) {
305 if (kp->pre_handler) {
306 set_kprobe_instance(kp);
307 if (kp->pre_handler(kp, regs))
308 return 1;
310 reset_kprobe_instance();
312 return 0;
315 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
316 unsigned long flags)
318 struct kprobe *kp;
320 list_for_each_entry_rcu(kp, &p->list, list) {
321 if (kp->post_handler) {
322 set_kprobe_instance(kp);
323 kp->post_handler(kp, regs, flags);
324 reset_kprobe_instance();
329 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
330 int trapnr)
332 struct kprobe *cur = __get_cpu_var(kprobe_instance);
335 * if we faulted "during" the execution of a user specified
336 * probe handler, invoke just that probe's fault handler
338 if (cur && cur->fault_handler) {
339 if (cur->fault_handler(cur, regs, trapnr))
340 return 1;
342 return 0;
345 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
347 struct kprobe *cur = __get_cpu_var(kprobe_instance);
348 int ret = 0;
350 if (cur && cur->break_handler) {
351 if (cur->break_handler(cur, regs))
352 ret = 1;
354 reset_kprobe_instance();
355 return ret;
358 /* Walks the list and increments nmissed count for multiprobe case */
359 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
361 struct kprobe *kp;
362 if (p->pre_handler != aggr_pre_handler) {
363 p->nmissed++;
364 } else {
365 list_for_each_entry_rcu(kp, &p->list, list)
366 kp->nmissed++;
368 return;
371 /* Called with kretprobe_lock held */
372 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
373 struct hlist_head *head)
375 /* remove rp inst off the rprobe_inst_table */
376 hlist_del(&ri->hlist);
377 if (ri->rp) {
378 /* remove rp inst off the used list */
379 hlist_del(&ri->uflist);
380 /* put rp inst back onto the free list */
381 INIT_HLIST_NODE(&ri->uflist);
382 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
383 } else
384 /* Unregistering */
385 hlist_add_head(&ri->hlist, head);
388 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
390 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
394 * This function is called from finish_task_switch when task tk becomes dead,
395 * so that we can recycle any function-return probe instances associated
396 * with this task. These left over instances represent probed functions
397 * that have been called but will never return.
399 void __kprobes kprobe_flush_task(struct task_struct *tk)
401 struct kretprobe_instance *ri;
402 struct hlist_head *head, empty_rp;
403 struct hlist_node *node, *tmp;
404 unsigned long flags = 0;
406 INIT_HLIST_HEAD(&empty_rp);
407 spin_lock_irqsave(&kretprobe_lock, flags);
408 head = kretprobe_inst_table_head(tk);
409 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
410 if (ri->task == tk)
411 recycle_rp_inst(ri, &empty_rp);
413 spin_unlock_irqrestore(&kretprobe_lock, flags);
415 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
416 hlist_del(&ri->hlist);
417 kfree(ri);
421 static inline void free_rp_inst(struct kretprobe *rp)
423 struct kretprobe_instance *ri;
424 struct hlist_node *pos, *next;
426 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
427 hlist_del(&ri->uflist);
428 kfree(ri);
432 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
434 unsigned long flags;
435 struct kretprobe_instance *ri;
436 struct hlist_node *pos, *next;
437 /* No race here */
438 spin_lock_irqsave(&kretprobe_lock, flags);
439 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
440 ri->rp = NULL;
441 hlist_del(&ri->uflist);
443 spin_unlock_irqrestore(&kretprobe_lock, flags);
444 free_rp_inst(rp);
448 * Keep all fields in the kprobe consistent
450 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
452 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
453 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
457 * Add the new probe to old_p->list. Fail if this is the
458 * second jprobe at the address - two jprobes can't coexist
460 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
462 if (p->break_handler) {
463 if (old_p->break_handler)
464 return -EEXIST;
465 list_add_tail_rcu(&p->list, &old_p->list);
466 old_p->break_handler = aggr_break_handler;
467 } else
468 list_add_rcu(&p->list, &old_p->list);
469 if (p->post_handler && !old_p->post_handler)
470 old_p->post_handler = aggr_post_handler;
471 return 0;
475 * Fill in the required fields of the "manager kprobe". Replace the
476 * earlier kprobe in the hlist with the manager kprobe
478 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
480 copy_kprobe(p, ap);
481 flush_insn_slot(ap);
482 ap->addr = p->addr;
483 ap->pre_handler = aggr_pre_handler;
484 ap->fault_handler = aggr_fault_handler;
485 if (p->post_handler)
486 ap->post_handler = aggr_post_handler;
487 if (p->break_handler)
488 ap->break_handler = aggr_break_handler;
490 INIT_LIST_HEAD(&ap->list);
491 list_add_rcu(&p->list, &ap->list);
493 hlist_replace_rcu(&p->hlist, &ap->hlist);
497 * This is the second or subsequent kprobe at the address - handle
498 * the intricacies
500 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
501 struct kprobe *p)
503 int ret = 0;
504 struct kprobe *ap;
506 if (old_p->pre_handler == aggr_pre_handler) {
507 copy_kprobe(old_p, p);
508 ret = add_new_kprobe(old_p, p);
509 } else {
510 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
511 if (!ap)
512 return -ENOMEM;
513 add_aggr_kprobe(ap, old_p);
514 copy_kprobe(ap, p);
515 ret = add_new_kprobe(ap, p);
517 return ret;
520 static int __kprobes in_kprobes_functions(unsigned long addr)
522 struct kprobe_blackpoint *kb;
524 if (addr >= (unsigned long)__kprobes_text_start &&
525 addr < (unsigned long)__kprobes_text_end)
526 return -EINVAL;
528 * If there exists a kprobe_blacklist, verify and
529 * fail any probe registration in the prohibited area
531 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
532 if (kb->start_addr) {
533 if (addr >= kb->start_addr &&
534 addr < (kb->start_addr + kb->range))
535 return -EINVAL;
538 return 0;
542 * If we have a symbol_name argument, look it up and add the offset field
543 * to it. This way, we can specify a relative address to a symbol.
545 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
547 kprobe_opcode_t *addr = p->addr;
548 if (p->symbol_name) {
549 if (addr)
550 return NULL;
551 kprobe_lookup_name(p->symbol_name, addr);
554 if (!addr)
555 return NULL;
556 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
559 static int __kprobes __register_kprobe(struct kprobe *p,
560 unsigned long called_from)
562 int ret = 0;
563 struct kprobe *old_p;
564 struct module *probed_mod;
565 kprobe_opcode_t *addr;
567 addr = kprobe_addr(p);
568 if (!addr)
569 return -EINVAL;
570 p->addr = addr;
572 if (!kernel_text_address((unsigned long) p->addr) ||
573 in_kprobes_functions((unsigned long) p->addr))
574 return -EINVAL;
576 p->mod_refcounted = 0;
579 * Check if are we probing a module.
581 probed_mod = module_text_address((unsigned long) p->addr);
582 if (probed_mod) {
583 struct module *calling_mod = module_text_address(called_from);
585 * We must allow modules to probe themself and in this case
586 * avoid incrementing the module refcount, so as to allow
587 * unloading of self probing modules.
589 if (calling_mod && calling_mod != probed_mod) {
590 if (unlikely(!try_module_get(probed_mod)))
591 return -EINVAL;
592 p->mod_refcounted = 1;
593 } else
594 probed_mod = NULL;
597 p->nmissed = 0;
598 INIT_LIST_HEAD(&p->list);
599 mutex_lock(&kprobe_mutex);
600 old_p = get_kprobe(p->addr);
601 if (old_p) {
602 ret = register_aggr_kprobe(old_p, p);
603 goto out;
606 ret = arch_prepare_kprobe(p);
607 if (ret)
608 goto out;
610 INIT_HLIST_NODE(&p->hlist);
611 hlist_add_head_rcu(&p->hlist,
612 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
614 if (kprobe_enabled)
615 arch_arm_kprobe(p);
617 out:
618 mutex_unlock(&kprobe_mutex);
620 if (ret && probed_mod)
621 module_put(probed_mod);
622 return ret;
626 * Unregister a kprobe without a scheduler synchronization.
628 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
630 struct kprobe *old_p, *list_p;
632 old_p = get_kprobe(p->addr);
633 if (unlikely(!old_p))
634 return -EINVAL;
636 if (p != old_p) {
637 list_for_each_entry_rcu(list_p, &old_p->list, list)
638 if (list_p == p)
639 /* kprobe p is a valid probe */
640 goto valid_p;
641 return -EINVAL;
643 valid_p:
644 if (old_p == p ||
645 (old_p->pre_handler == aggr_pre_handler &&
646 list_is_singular(&old_p->list))) {
648 * Only probe on the hash list. Disarm only if kprobes are
649 * enabled - otherwise, the breakpoint would already have
650 * been removed. We save on flushing icache.
652 if (kprobe_enabled)
653 arch_disarm_kprobe(p);
654 hlist_del_rcu(&old_p->hlist);
655 } else {
656 if (p->break_handler)
657 old_p->break_handler = NULL;
658 if (p->post_handler) {
659 list_for_each_entry_rcu(list_p, &old_p->list, list) {
660 if ((list_p != p) && (list_p->post_handler))
661 goto noclean;
663 old_p->post_handler = NULL;
665 noclean:
666 list_del_rcu(&p->list);
668 return 0;
671 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
673 struct module *mod;
674 struct kprobe *old_p;
676 if (p->mod_refcounted) {
677 mod = module_text_address((unsigned long)p->addr);
678 if (mod)
679 module_put(mod);
682 if (list_empty(&p->list) || list_is_singular(&p->list)) {
683 if (!list_empty(&p->list)) {
684 /* "p" is the last child of an aggr_kprobe */
685 old_p = list_entry(p->list.next, struct kprobe, list);
686 list_del(&p->list);
687 kfree(old_p);
689 arch_remove_kprobe(p);
693 static int __register_kprobes(struct kprobe **kps, int num,
694 unsigned long called_from)
696 int i, ret = 0;
698 if (num <= 0)
699 return -EINVAL;
700 for (i = 0; i < num; i++) {
701 ret = __register_kprobe(kps[i], called_from);
702 if (ret < 0 && i > 0) {
703 unregister_kprobes(kps, i);
704 break;
707 return ret;
711 * Registration and unregistration functions for kprobe.
713 int __kprobes register_kprobe(struct kprobe *p)
715 return __register_kprobes(&p, 1,
716 (unsigned long)__builtin_return_address(0));
719 void __kprobes unregister_kprobe(struct kprobe *p)
721 unregister_kprobes(&p, 1);
724 int __kprobes register_kprobes(struct kprobe **kps, int num)
726 return __register_kprobes(kps, num,
727 (unsigned long)__builtin_return_address(0));
730 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
732 int i;
734 if (num <= 0)
735 return;
736 mutex_lock(&kprobe_mutex);
737 for (i = 0; i < num; i++)
738 if (__unregister_kprobe_top(kps[i]) < 0)
739 kps[i]->addr = NULL;
740 mutex_unlock(&kprobe_mutex);
742 synchronize_sched();
743 for (i = 0; i < num; i++)
744 if (kps[i]->addr)
745 __unregister_kprobe_bottom(kps[i]);
748 static struct notifier_block kprobe_exceptions_nb = {
749 .notifier_call = kprobe_exceptions_notify,
750 .priority = 0x7fffffff /* we need to be notified first */
753 unsigned long __weak arch_deref_entry_point(void *entry)
755 return (unsigned long)entry;
758 static int __register_jprobes(struct jprobe **jps, int num,
759 unsigned long called_from)
761 struct jprobe *jp;
762 int ret = 0, i;
764 if (num <= 0)
765 return -EINVAL;
766 for (i = 0; i < num; i++) {
767 unsigned long addr;
768 jp = jps[i];
769 addr = arch_deref_entry_point(jp->entry);
771 if (!kernel_text_address(addr))
772 ret = -EINVAL;
773 else {
774 /* Todo: Verify probepoint is a function entry point */
775 jp->kp.pre_handler = setjmp_pre_handler;
776 jp->kp.break_handler = longjmp_break_handler;
777 ret = __register_kprobe(&jp->kp, called_from);
779 if (ret < 0 && i > 0) {
780 unregister_jprobes(jps, i);
781 break;
784 return ret;
787 int __kprobes register_jprobe(struct jprobe *jp)
789 return __register_jprobes(&jp, 1,
790 (unsigned long)__builtin_return_address(0));
793 void __kprobes unregister_jprobe(struct jprobe *jp)
795 unregister_jprobes(&jp, 1);
798 int __kprobes register_jprobes(struct jprobe **jps, int num)
800 return __register_jprobes(jps, num,
801 (unsigned long)__builtin_return_address(0));
804 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
806 int i;
808 if (num <= 0)
809 return;
810 mutex_lock(&kprobe_mutex);
811 for (i = 0; i < num; i++)
812 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
813 jps[i]->kp.addr = NULL;
814 mutex_unlock(&kprobe_mutex);
816 synchronize_sched();
817 for (i = 0; i < num; i++) {
818 if (jps[i]->kp.addr)
819 __unregister_kprobe_bottom(&jps[i]->kp);
823 #ifdef CONFIG_KRETPROBES
825 * This kprobe pre_handler is registered with every kretprobe. When probe
826 * hits it will set up the return probe.
828 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
829 struct pt_regs *regs)
831 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
832 unsigned long flags = 0;
834 /*TODO: consider to only swap the RA after the last pre_handler fired */
835 spin_lock_irqsave(&kretprobe_lock, flags);
836 if (!hlist_empty(&rp->free_instances)) {
837 struct kretprobe_instance *ri;
839 ri = hlist_entry(rp->free_instances.first,
840 struct kretprobe_instance, uflist);
841 ri->rp = rp;
842 ri->task = current;
844 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
845 spin_unlock_irqrestore(&kretprobe_lock, flags);
846 return 0;
849 arch_prepare_kretprobe(ri, regs);
851 /* XXX(hch): why is there no hlist_move_head? */
852 hlist_del(&ri->uflist);
853 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
854 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
855 } else
856 rp->nmissed++;
857 spin_unlock_irqrestore(&kretprobe_lock, flags);
858 return 0;
861 static int __kprobes __register_kretprobe(struct kretprobe *rp,
862 unsigned long called_from)
864 int ret = 0;
865 struct kretprobe_instance *inst;
866 int i;
867 void *addr;
869 if (kretprobe_blacklist_size) {
870 addr = kprobe_addr(&rp->kp);
871 if (!addr)
872 return -EINVAL;
874 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
875 if (kretprobe_blacklist[i].addr == addr)
876 return -EINVAL;
880 rp->kp.pre_handler = pre_handler_kretprobe;
881 rp->kp.post_handler = NULL;
882 rp->kp.fault_handler = NULL;
883 rp->kp.break_handler = NULL;
885 /* Pre-allocate memory for max kretprobe instances */
886 if (rp->maxactive <= 0) {
887 #ifdef CONFIG_PREEMPT
888 rp->maxactive = max(10, 2 * NR_CPUS);
889 #else
890 rp->maxactive = NR_CPUS;
891 #endif
893 INIT_HLIST_HEAD(&rp->used_instances);
894 INIT_HLIST_HEAD(&rp->free_instances);
895 for (i = 0; i < rp->maxactive; i++) {
896 inst = kmalloc(sizeof(struct kretprobe_instance) +
897 rp->data_size, GFP_KERNEL);
898 if (inst == NULL) {
899 free_rp_inst(rp);
900 return -ENOMEM;
902 INIT_HLIST_NODE(&inst->uflist);
903 hlist_add_head(&inst->uflist, &rp->free_instances);
906 rp->nmissed = 0;
907 /* Establish function entry probe point */
908 ret = __register_kprobe(&rp->kp, called_from);
909 if (ret != 0)
910 free_rp_inst(rp);
911 return ret;
914 static int __register_kretprobes(struct kretprobe **rps, int num,
915 unsigned long called_from)
917 int ret = 0, i;
919 if (num <= 0)
920 return -EINVAL;
921 for (i = 0; i < num; i++) {
922 ret = __register_kretprobe(rps[i], called_from);
923 if (ret < 0 && i > 0) {
924 unregister_kretprobes(rps, i);
925 break;
928 return ret;
931 int __kprobes register_kretprobe(struct kretprobe *rp)
933 return __register_kretprobes(&rp, 1,
934 (unsigned long)__builtin_return_address(0));
937 void __kprobes unregister_kretprobe(struct kretprobe *rp)
939 unregister_kretprobes(&rp, 1);
942 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
944 return __register_kretprobes(rps, num,
945 (unsigned long)__builtin_return_address(0));
948 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
950 int i;
952 if (num <= 0)
953 return;
954 mutex_lock(&kprobe_mutex);
955 for (i = 0; i < num; i++)
956 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
957 rps[i]->kp.addr = NULL;
958 mutex_unlock(&kprobe_mutex);
960 synchronize_sched();
961 for (i = 0; i < num; i++) {
962 if (rps[i]->kp.addr) {
963 __unregister_kprobe_bottom(&rps[i]->kp);
964 cleanup_rp_inst(rps[i]);
969 #else /* CONFIG_KRETPROBES */
970 int __kprobes register_kretprobe(struct kretprobe *rp)
972 return -ENOSYS;
975 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
977 return -ENOSYS;
979 void __kprobes unregister_kretprobe(struct kretprobe *rp)
983 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
987 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
988 struct pt_regs *regs)
990 return 0;
993 #endif /* CONFIG_KRETPROBES */
995 static int __init init_kprobes(void)
997 int i, err = 0;
998 unsigned long offset = 0, size = 0;
999 char *modname, namebuf[128];
1000 const char *symbol_name;
1001 void *addr;
1002 struct kprobe_blackpoint *kb;
1004 /* FIXME allocate the probe table, currently defined statically */
1005 /* initialize all list heads */
1006 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1007 INIT_HLIST_HEAD(&kprobe_table[i]);
1008 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1012 * Lookup and populate the kprobe_blacklist.
1014 * Unlike the kretprobe blacklist, we'll need to determine
1015 * the range of addresses that belong to the said functions,
1016 * since a kprobe need not necessarily be at the beginning
1017 * of a function.
1019 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1020 kprobe_lookup_name(kb->name, addr);
1021 if (!addr)
1022 continue;
1024 kb->start_addr = (unsigned long)addr;
1025 symbol_name = kallsyms_lookup(kb->start_addr,
1026 &size, &offset, &modname, namebuf);
1027 if (!symbol_name)
1028 kb->range = 0;
1029 else
1030 kb->range = size;
1033 if (kretprobe_blacklist_size) {
1034 /* lookup the function address from its name */
1035 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1036 kprobe_lookup_name(kretprobe_blacklist[i].name,
1037 kretprobe_blacklist[i].addr);
1038 if (!kretprobe_blacklist[i].addr)
1039 printk("kretprobe: lookup failed: %s\n",
1040 kretprobe_blacklist[i].name);
1044 /* By default, kprobes are enabled */
1045 kprobe_enabled = true;
1047 err = arch_init_kprobes();
1048 if (!err)
1049 err = register_die_notifier(&kprobe_exceptions_nb);
1051 if (!err)
1052 init_test_probes();
1053 return err;
1056 #ifdef CONFIG_DEBUG_FS
1057 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1058 const char *sym, int offset,char *modname)
1060 char *kprobe_type;
1062 if (p->pre_handler == pre_handler_kretprobe)
1063 kprobe_type = "r";
1064 else if (p->pre_handler == setjmp_pre_handler)
1065 kprobe_type = "j";
1066 else
1067 kprobe_type = "k";
1068 if (sym)
1069 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
1070 sym, offset, (modname ? modname : " "));
1071 else
1072 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
1075 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1077 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1080 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1082 (*pos)++;
1083 if (*pos >= KPROBE_TABLE_SIZE)
1084 return NULL;
1085 return pos;
1088 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1090 /* Nothing to do */
1093 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1095 struct hlist_head *head;
1096 struct hlist_node *node;
1097 struct kprobe *p, *kp;
1098 const char *sym = NULL;
1099 unsigned int i = *(loff_t *) v;
1100 unsigned long offset = 0;
1101 char *modname, namebuf[128];
1103 head = &kprobe_table[i];
1104 preempt_disable();
1105 hlist_for_each_entry_rcu(p, node, head, hlist) {
1106 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1107 &offset, &modname, namebuf);
1108 if (p->pre_handler == aggr_pre_handler) {
1109 list_for_each_entry_rcu(kp, &p->list, list)
1110 report_probe(pi, kp, sym, offset, modname);
1111 } else
1112 report_probe(pi, p, sym, offset, modname);
1114 preempt_enable();
1115 return 0;
1118 static struct seq_operations kprobes_seq_ops = {
1119 .start = kprobe_seq_start,
1120 .next = kprobe_seq_next,
1121 .stop = kprobe_seq_stop,
1122 .show = show_kprobe_addr
1125 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1127 return seq_open(filp, &kprobes_seq_ops);
1130 static struct file_operations debugfs_kprobes_operations = {
1131 .open = kprobes_open,
1132 .read = seq_read,
1133 .llseek = seq_lseek,
1134 .release = seq_release,
1137 static void __kprobes enable_all_kprobes(void)
1139 struct hlist_head *head;
1140 struct hlist_node *node;
1141 struct kprobe *p;
1142 unsigned int i;
1144 mutex_lock(&kprobe_mutex);
1146 /* If kprobes are already enabled, just return */
1147 if (kprobe_enabled)
1148 goto already_enabled;
1150 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1151 head = &kprobe_table[i];
1152 hlist_for_each_entry_rcu(p, node, head, hlist)
1153 arch_arm_kprobe(p);
1156 kprobe_enabled = true;
1157 printk(KERN_INFO "Kprobes globally enabled\n");
1159 already_enabled:
1160 mutex_unlock(&kprobe_mutex);
1161 return;
1164 static void __kprobes disable_all_kprobes(void)
1166 struct hlist_head *head;
1167 struct hlist_node *node;
1168 struct kprobe *p;
1169 unsigned int i;
1171 mutex_lock(&kprobe_mutex);
1173 /* If kprobes are already disabled, just return */
1174 if (!kprobe_enabled)
1175 goto already_disabled;
1177 kprobe_enabled = false;
1178 printk(KERN_INFO "Kprobes globally disabled\n");
1179 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1180 head = &kprobe_table[i];
1181 hlist_for_each_entry_rcu(p, node, head, hlist) {
1182 if (!arch_trampoline_kprobe(p))
1183 arch_disarm_kprobe(p);
1187 mutex_unlock(&kprobe_mutex);
1188 /* Allow all currently running kprobes to complete */
1189 synchronize_sched();
1190 return;
1192 already_disabled:
1193 mutex_unlock(&kprobe_mutex);
1194 return;
1198 * XXX: The debugfs bool file interface doesn't allow for callbacks
1199 * when the bool state is switched. We can reuse that facility when
1200 * available
1202 static ssize_t read_enabled_file_bool(struct file *file,
1203 char __user *user_buf, size_t count, loff_t *ppos)
1205 char buf[3];
1207 if (kprobe_enabled)
1208 buf[0] = '1';
1209 else
1210 buf[0] = '0';
1211 buf[1] = '\n';
1212 buf[2] = 0x00;
1213 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1216 static ssize_t write_enabled_file_bool(struct file *file,
1217 const char __user *user_buf, size_t count, loff_t *ppos)
1219 char buf[32];
1220 int buf_size;
1222 buf_size = min(count, (sizeof(buf)-1));
1223 if (copy_from_user(buf, user_buf, buf_size))
1224 return -EFAULT;
1226 switch (buf[0]) {
1227 case 'y':
1228 case 'Y':
1229 case '1':
1230 enable_all_kprobes();
1231 break;
1232 case 'n':
1233 case 'N':
1234 case '0':
1235 disable_all_kprobes();
1236 break;
1239 return count;
1242 static struct file_operations fops_kp = {
1243 .read = read_enabled_file_bool,
1244 .write = write_enabled_file_bool,
1247 static int __kprobes debugfs_kprobe_init(void)
1249 struct dentry *dir, *file;
1250 unsigned int value = 1;
1252 dir = debugfs_create_dir("kprobes", NULL);
1253 if (!dir)
1254 return -ENOMEM;
1256 file = debugfs_create_file("list", 0444, dir, NULL,
1257 &debugfs_kprobes_operations);
1258 if (!file) {
1259 debugfs_remove(dir);
1260 return -ENOMEM;
1263 file = debugfs_create_file("enabled", 0600, dir,
1264 &value, &fops_kp);
1265 if (!file) {
1266 debugfs_remove(dir);
1267 return -ENOMEM;
1270 return 0;
1273 late_initcall(debugfs_kprobe_init);
1274 #endif /* CONFIG_DEBUG_FS */
1276 module_init(init_kprobes);
1278 EXPORT_SYMBOL_GPL(register_kprobe);
1279 EXPORT_SYMBOL_GPL(unregister_kprobe);
1280 EXPORT_SYMBOL_GPL(register_kprobes);
1281 EXPORT_SYMBOL_GPL(unregister_kprobes);
1282 EXPORT_SYMBOL_GPL(register_jprobe);
1283 EXPORT_SYMBOL_GPL(unregister_jprobe);
1284 EXPORT_SYMBOL_GPL(register_jprobes);
1285 EXPORT_SYMBOL_GPL(unregister_jprobes);
1286 #ifdef CONFIG_KPROBES
1287 EXPORT_SYMBOL_GPL(jprobe_return);
1288 #endif
1290 #ifdef CONFIG_KPROBES
1291 EXPORT_SYMBOL_GPL(register_kretprobe);
1292 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1293 EXPORT_SYMBOL_GPL(register_kretprobes);
1294 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1295 #endif