IA64: Slim down __clear_bit_unlock
[linux-2.6/mini2440.git] / kernel / kprobes.c
blobe3a5d817ac9b0f07b514587a7ec03453461ab655
1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63 #endif
65 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
68 /* NOTE: change this value only with kprobe_mutex held */
69 static bool kprobe_enabled;
71 DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
72 DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
73 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
77 * kprobe->ainsn.insn points to the copy of the instruction to be
78 * single-stepped. x86_64, POWER4 and above have no-exec support and
79 * stepping on the instruction on a vmalloced/kmalloced/data page
80 * is a recipe for disaster
82 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
84 struct kprobe_insn_page {
85 struct hlist_node hlist;
86 kprobe_opcode_t *insns; /* Page of instruction slots */
87 char slot_used[INSNS_PER_PAGE];
88 int nused;
89 int ngarbage;
92 enum kprobe_slot_state {
93 SLOT_CLEAN = 0,
94 SLOT_DIRTY = 1,
95 SLOT_USED = 2,
98 static struct hlist_head kprobe_insn_pages;
99 static int kprobe_garbage_slots;
100 static int collect_garbage_slots(void);
102 static int __kprobes check_safety(void)
104 int ret = 0;
105 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106 ret = freeze_processes();
107 if (ret == 0) {
108 struct task_struct *p, *q;
109 do_each_thread(p, q) {
110 if (p != current && p->state == TASK_RUNNING &&
111 p->pid != 0) {
112 printk("Check failed: %s is running\n",p->comm);
113 ret = -1;
114 goto loop_end;
116 } while_each_thread(p, q);
118 loop_end:
119 thaw_processes();
120 #else
121 synchronize_sched();
122 #endif
123 return ret;
127 * get_insn_slot() - Find a slot on an executable page for an instruction.
128 * We allocate an executable page if there's no room on existing ones.
130 kprobe_opcode_t __kprobes *get_insn_slot(void)
132 struct kprobe_insn_page *kip;
133 struct hlist_node *pos;
135 retry:
136 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
137 if (kip->nused < INSNS_PER_PAGE) {
138 int i;
139 for (i = 0; i < INSNS_PER_PAGE; i++) {
140 if (kip->slot_used[i] == SLOT_CLEAN) {
141 kip->slot_used[i] = SLOT_USED;
142 kip->nused++;
143 return kip->insns + (i * MAX_INSN_SIZE);
146 /* Surprise! No unused slots. Fix kip->nused. */
147 kip->nused = INSNS_PER_PAGE;
151 /* If there are any garbage slots, collect it and try again. */
152 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
153 goto retry;
155 /* All out of space. Need to allocate a new page. Use slot 0. */
156 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
157 if (!kip)
158 return NULL;
161 * Use module_alloc so this page is within +/- 2GB of where the
162 * kernel image and loaded module images reside. This is required
163 * so x86_64 can correctly handle the %rip-relative fixups.
165 kip->insns = module_alloc(PAGE_SIZE);
166 if (!kip->insns) {
167 kfree(kip);
168 return NULL;
170 INIT_HLIST_NODE(&kip->hlist);
171 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
172 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173 kip->slot_used[0] = SLOT_USED;
174 kip->nused = 1;
175 kip->ngarbage = 0;
176 return kip->insns;
179 /* Return 1 if all garbages are collected, otherwise 0. */
180 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
182 kip->slot_used[idx] = SLOT_CLEAN;
183 kip->nused--;
184 if (kip->nused == 0) {
186 * Page is no longer in use. Free it unless
187 * it's the last one. We keep the last one
188 * so as not to have to set it up again the
189 * next time somebody inserts a probe.
191 hlist_del(&kip->hlist);
192 if (hlist_empty(&kprobe_insn_pages)) {
193 INIT_HLIST_NODE(&kip->hlist);
194 hlist_add_head(&kip->hlist,
195 &kprobe_insn_pages);
196 } else {
197 module_free(NULL, kip->insns);
198 kfree(kip);
200 return 1;
202 return 0;
205 static int __kprobes collect_garbage_slots(void)
207 struct kprobe_insn_page *kip;
208 struct hlist_node *pos, *next;
210 /* Ensure no-one is preepmted on the garbages */
211 if (check_safety() != 0)
212 return -EAGAIN;
214 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
215 int i;
216 if (kip->ngarbage == 0)
217 continue;
218 kip->ngarbage = 0; /* we will collect all garbages */
219 for (i = 0; i < INSNS_PER_PAGE; i++) {
220 if (kip->slot_used[i] == SLOT_DIRTY &&
221 collect_one_slot(kip, i))
222 break;
225 kprobe_garbage_slots = 0;
226 return 0;
229 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
231 struct kprobe_insn_page *kip;
232 struct hlist_node *pos;
234 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
235 if (kip->insns <= slot &&
236 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
237 int i = (slot - kip->insns) / MAX_INSN_SIZE;
238 if (dirty) {
239 kip->slot_used[i] = SLOT_DIRTY;
240 kip->ngarbage++;
241 } else {
242 collect_one_slot(kip, i);
244 break;
248 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
249 collect_garbage_slots();
251 #endif
253 /* We have preemption disabled.. so it is safe to use __ versions */
254 static inline void set_kprobe_instance(struct kprobe *kp)
256 __get_cpu_var(kprobe_instance) = kp;
259 static inline void reset_kprobe_instance(void)
261 __get_cpu_var(kprobe_instance) = NULL;
265 * This routine is called either:
266 * - under the kprobe_mutex - during kprobe_[un]register()
267 * OR
268 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
270 struct kprobe __kprobes *get_kprobe(void *addr)
272 struct hlist_head *head;
273 struct hlist_node *node;
274 struct kprobe *p;
276 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
277 hlist_for_each_entry_rcu(p, node, head, hlist) {
278 if (p->addr == addr)
279 return p;
281 return NULL;
285 * Aggregate handlers for multiple kprobes support - these handlers
286 * take care of invoking the individual kprobe handlers on p->list
288 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
290 struct kprobe *kp;
292 list_for_each_entry_rcu(kp, &p->list, list) {
293 if (kp->pre_handler) {
294 set_kprobe_instance(kp);
295 if (kp->pre_handler(kp, regs))
296 return 1;
298 reset_kprobe_instance();
300 return 0;
303 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
304 unsigned long flags)
306 struct kprobe *kp;
308 list_for_each_entry_rcu(kp, &p->list, list) {
309 if (kp->post_handler) {
310 set_kprobe_instance(kp);
311 kp->post_handler(kp, regs, flags);
312 reset_kprobe_instance();
317 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
318 int trapnr)
320 struct kprobe *cur = __get_cpu_var(kprobe_instance);
323 * if we faulted "during" the execution of a user specified
324 * probe handler, invoke just that probe's fault handler
326 if (cur && cur->fault_handler) {
327 if (cur->fault_handler(cur, regs, trapnr))
328 return 1;
330 return 0;
333 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
335 struct kprobe *cur = __get_cpu_var(kprobe_instance);
336 int ret = 0;
338 if (cur && cur->break_handler) {
339 if (cur->break_handler(cur, regs))
340 ret = 1;
342 reset_kprobe_instance();
343 return ret;
346 /* Walks the list and increments nmissed count for multiprobe case */
347 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
349 struct kprobe *kp;
350 if (p->pre_handler != aggr_pre_handler) {
351 p->nmissed++;
352 } else {
353 list_for_each_entry_rcu(kp, &p->list, list)
354 kp->nmissed++;
356 return;
359 /* Called with kretprobe_lock held */
360 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
361 struct hlist_head *head)
363 /* remove rp inst off the rprobe_inst_table */
364 hlist_del(&ri->hlist);
365 if (ri->rp) {
366 /* remove rp inst off the used list */
367 hlist_del(&ri->uflist);
368 /* put rp inst back onto the free list */
369 INIT_HLIST_NODE(&ri->uflist);
370 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
371 } else
372 /* Unregistering */
373 hlist_add_head(&ri->hlist, head);
376 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
378 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
382 * This function is called from finish_task_switch when task tk becomes dead,
383 * so that we can recycle any function-return probe instances associated
384 * with this task. These left over instances represent probed functions
385 * that have been called but will never return.
387 void __kprobes kprobe_flush_task(struct task_struct *tk)
389 struct kretprobe_instance *ri;
390 struct hlist_head *head, empty_rp;
391 struct hlist_node *node, *tmp;
392 unsigned long flags = 0;
394 INIT_HLIST_HEAD(&empty_rp);
395 spin_lock_irqsave(&kretprobe_lock, flags);
396 head = kretprobe_inst_table_head(tk);
397 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
398 if (ri->task == tk)
399 recycle_rp_inst(ri, &empty_rp);
401 spin_unlock_irqrestore(&kretprobe_lock, flags);
403 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
404 hlist_del(&ri->hlist);
405 kfree(ri);
409 static inline void free_rp_inst(struct kretprobe *rp)
411 struct kretprobe_instance *ri;
412 struct hlist_node *pos, *next;
414 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
415 hlist_del(&ri->uflist);
416 kfree(ri);
421 * Keep all fields in the kprobe consistent
423 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
425 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
426 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
430 * Add the new probe to old_p->list. Fail if this is the
431 * second jprobe at the address - two jprobes can't coexist
433 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
435 if (p->break_handler) {
436 if (old_p->break_handler)
437 return -EEXIST;
438 list_add_tail_rcu(&p->list, &old_p->list);
439 old_p->break_handler = aggr_break_handler;
440 } else
441 list_add_rcu(&p->list, &old_p->list);
442 if (p->post_handler && !old_p->post_handler)
443 old_p->post_handler = aggr_post_handler;
444 return 0;
448 * Fill in the required fields of the "manager kprobe". Replace the
449 * earlier kprobe in the hlist with the manager kprobe
451 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
453 copy_kprobe(p, ap);
454 flush_insn_slot(ap);
455 ap->addr = p->addr;
456 ap->pre_handler = aggr_pre_handler;
457 ap->fault_handler = aggr_fault_handler;
458 if (p->post_handler)
459 ap->post_handler = aggr_post_handler;
460 if (p->break_handler)
461 ap->break_handler = aggr_break_handler;
463 INIT_LIST_HEAD(&ap->list);
464 list_add_rcu(&p->list, &ap->list);
466 hlist_replace_rcu(&p->hlist, &ap->hlist);
470 * This is the second or subsequent kprobe at the address - handle
471 * the intricacies
473 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
474 struct kprobe *p)
476 int ret = 0;
477 struct kprobe *ap;
479 if (old_p->pre_handler == aggr_pre_handler) {
480 copy_kprobe(old_p, p);
481 ret = add_new_kprobe(old_p, p);
482 } else {
483 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
484 if (!ap)
485 return -ENOMEM;
486 add_aggr_kprobe(ap, old_p);
487 copy_kprobe(ap, p);
488 ret = add_new_kprobe(ap, p);
490 return ret;
493 static int __kprobes in_kprobes_functions(unsigned long addr)
495 if (addr >= (unsigned long)__kprobes_text_start &&
496 addr < (unsigned long)__kprobes_text_end)
497 return -EINVAL;
498 return 0;
501 static int __kprobes __register_kprobe(struct kprobe *p,
502 unsigned long called_from)
504 int ret = 0;
505 struct kprobe *old_p;
506 struct module *probed_mod;
509 * If we have a symbol_name argument look it up,
510 * and add it to the address. That way the addr
511 * field can either be global or relative to a symbol.
513 if (p->symbol_name) {
514 if (p->addr)
515 return -EINVAL;
516 kprobe_lookup_name(p->symbol_name, p->addr);
519 if (!p->addr)
520 return -EINVAL;
521 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
523 if (!kernel_text_address((unsigned long) p->addr) ||
524 in_kprobes_functions((unsigned long) p->addr))
525 return -EINVAL;
527 p->mod_refcounted = 0;
530 * Check if are we probing a module.
532 probed_mod = module_text_address((unsigned long) p->addr);
533 if (probed_mod) {
534 struct module *calling_mod = module_text_address(called_from);
536 * We must allow modules to probe themself and in this case
537 * avoid incrementing the module refcount, so as to allow
538 * unloading of self probing modules.
540 if (calling_mod && calling_mod != probed_mod) {
541 if (unlikely(!try_module_get(probed_mod)))
542 return -EINVAL;
543 p->mod_refcounted = 1;
544 } else
545 probed_mod = NULL;
548 p->nmissed = 0;
549 mutex_lock(&kprobe_mutex);
550 old_p = get_kprobe(p->addr);
551 if (old_p) {
552 ret = register_aggr_kprobe(old_p, p);
553 goto out;
556 ret = arch_prepare_kprobe(p);
557 if (ret)
558 goto out;
560 INIT_HLIST_NODE(&p->hlist);
561 hlist_add_head_rcu(&p->hlist,
562 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
564 if (kprobe_enabled)
565 arch_arm_kprobe(p);
567 out:
568 mutex_unlock(&kprobe_mutex);
570 if (ret && probed_mod)
571 module_put(probed_mod);
572 return ret;
575 int __kprobes register_kprobe(struct kprobe *p)
577 return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
580 void __kprobes unregister_kprobe(struct kprobe *p)
582 struct module *mod;
583 struct kprobe *old_p, *list_p;
584 int cleanup_p;
586 mutex_lock(&kprobe_mutex);
587 old_p = get_kprobe(p->addr);
588 if (unlikely(!old_p)) {
589 mutex_unlock(&kprobe_mutex);
590 return;
592 if (p != old_p) {
593 list_for_each_entry_rcu(list_p, &old_p->list, list)
594 if (list_p == p)
595 /* kprobe p is a valid probe */
596 goto valid_p;
597 mutex_unlock(&kprobe_mutex);
598 return;
600 valid_p:
601 if (old_p == p ||
602 (old_p->pre_handler == aggr_pre_handler &&
603 p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
605 * Only probe on the hash list. Disarm only if kprobes are
606 * enabled - otherwise, the breakpoint would already have
607 * been removed. We save on flushing icache.
609 if (kprobe_enabled)
610 arch_disarm_kprobe(p);
611 hlist_del_rcu(&old_p->hlist);
612 cleanup_p = 1;
613 } else {
614 list_del_rcu(&p->list);
615 cleanup_p = 0;
618 mutex_unlock(&kprobe_mutex);
620 synchronize_sched();
621 if (p->mod_refcounted) {
622 mod = module_text_address((unsigned long)p->addr);
623 if (mod)
624 module_put(mod);
627 if (cleanup_p) {
628 if (p != old_p) {
629 list_del_rcu(&p->list);
630 kfree(old_p);
632 arch_remove_kprobe(p);
633 } else {
634 mutex_lock(&kprobe_mutex);
635 if (p->break_handler)
636 old_p->break_handler = NULL;
637 if (p->post_handler){
638 list_for_each_entry_rcu(list_p, &old_p->list, list){
639 if (list_p->post_handler){
640 cleanup_p = 2;
641 break;
644 if (cleanup_p == 0)
645 old_p->post_handler = NULL;
647 mutex_unlock(&kprobe_mutex);
651 static struct notifier_block kprobe_exceptions_nb = {
652 .notifier_call = kprobe_exceptions_notify,
653 .priority = 0x7fffffff /* we need to be notified first */
656 unsigned long __weak arch_deref_entry_point(void *entry)
658 return (unsigned long)entry;
661 int __kprobes register_jprobe(struct jprobe *jp)
663 unsigned long addr = arch_deref_entry_point(jp->entry);
665 if (!kernel_text_address(addr))
666 return -EINVAL;
668 /* Todo: Verify probepoint is a function entry point */
669 jp->kp.pre_handler = setjmp_pre_handler;
670 jp->kp.break_handler = longjmp_break_handler;
672 return __register_kprobe(&jp->kp,
673 (unsigned long)__builtin_return_address(0));
676 void __kprobes unregister_jprobe(struct jprobe *jp)
678 unregister_kprobe(&jp->kp);
681 #ifdef ARCH_SUPPORTS_KRETPROBES
684 * This kprobe pre_handler is registered with every kretprobe. When probe
685 * hits it will set up the return probe.
687 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
688 struct pt_regs *regs)
690 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
691 unsigned long flags = 0;
693 /*TODO: consider to only swap the RA after the last pre_handler fired */
694 spin_lock_irqsave(&kretprobe_lock, flags);
695 if (!hlist_empty(&rp->free_instances)) {
696 struct kretprobe_instance *ri;
698 ri = hlist_entry(rp->free_instances.first,
699 struct kretprobe_instance, uflist);
700 ri->rp = rp;
701 ri->task = current;
702 arch_prepare_kretprobe(ri, regs);
704 /* XXX(hch): why is there no hlist_move_head? */
705 hlist_del(&ri->uflist);
706 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
707 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
708 } else
709 rp->nmissed++;
710 spin_unlock_irqrestore(&kretprobe_lock, flags);
711 return 0;
714 int __kprobes register_kretprobe(struct kretprobe *rp)
716 int ret = 0;
717 struct kretprobe_instance *inst;
718 int i;
719 void *addr = rp->kp.addr;
721 if (kretprobe_blacklist_size) {
722 if (addr == NULL)
723 kprobe_lookup_name(rp->kp.symbol_name, addr);
724 addr += rp->kp.offset;
726 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
727 if (kretprobe_blacklist[i].addr == addr)
728 return -EINVAL;
732 rp->kp.pre_handler = pre_handler_kretprobe;
733 rp->kp.post_handler = NULL;
734 rp->kp.fault_handler = NULL;
735 rp->kp.break_handler = NULL;
737 /* Pre-allocate memory for max kretprobe instances */
738 if (rp->maxactive <= 0) {
739 #ifdef CONFIG_PREEMPT
740 rp->maxactive = max(10, 2 * NR_CPUS);
741 #else
742 rp->maxactive = NR_CPUS;
743 #endif
745 INIT_HLIST_HEAD(&rp->used_instances);
746 INIT_HLIST_HEAD(&rp->free_instances);
747 for (i = 0; i < rp->maxactive; i++) {
748 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
749 if (inst == NULL) {
750 free_rp_inst(rp);
751 return -ENOMEM;
753 INIT_HLIST_NODE(&inst->uflist);
754 hlist_add_head(&inst->uflist, &rp->free_instances);
757 rp->nmissed = 0;
758 /* Establish function entry probe point */
759 if ((ret = __register_kprobe(&rp->kp,
760 (unsigned long)__builtin_return_address(0))) != 0)
761 free_rp_inst(rp);
762 return ret;
765 #else /* ARCH_SUPPORTS_KRETPROBES */
767 int __kprobes register_kretprobe(struct kretprobe *rp)
769 return -ENOSYS;
772 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
773 struct pt_regs *regs)
775 return 0;
778 #endif /* ARCH_SUPPORTS_KRETPROBES */
780 void __kprobes unregister_kretprobe(struct kretprobe *rp)
782 unsigned long flags;
783 struct kretprobe_instance *ri;
784 struct hlist_node *pos, *next;
786 unregister_kprobe(&rp->kp);
788 /* No race here */
789 spin_lock_irqsave(&kretprobe_lock, flags);
790 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
791 ri->rp = NULL;
792 hlist_del(&ri->uflist);
794 spin_unlock_irqrestore(&kretprobe_lock, flags);
795 free_rp_inst(rp);
798 static int __init init_kprobes(void)
800 int i, err = 0;
802 /* FIXME allocate the probe table, currently defined statically */
803 /* initialize all list heads */
804 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
805 INIT_HLIST_HEAD(&kprobe_table[i]);
806 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
809 if (kretprobe_blacklist_size) {
810 /* lookup the function address from its name */
811 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
812 kprobe_lookup_name(kretprobe_blacklist[i].name,
813 kretprobe_blacklist[i].addr);
814 if (!kretprobe_blacklist[i].addr)
815 printk("kretprobe: lookup failed: %s\n",
816 kretprobe_blacklist[i].name);
820 /* By default, kprobes are enabled */
821 kprobe_enabled = true;
823 err = arch_init_kprobes();
824 if (!err)
825 err = register_die_notifier(&kprobe_exceptions_nb);
827 return err;
830 #ifdef CONFIG_DEBUG_FS
831 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
832 const char *sym, int offset,char *modname)
834 char *kprobe_type;
836 if (p->pre_handler == pre_handler_kretprobe)
837 kprobe_type = "r";
838 else if (p->pre_handler == setjmp_pre_handler)
839 kprobe_type = "j";
840 else
841 kprobe_type = "k";
842 if (sym)
843 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
844 sym, offset, (modname ? modname : " "));
845 else
846 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
849 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
851 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
854 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
856 (*pos)++;
857 if (*pos >= KPROBE_TABLE_SIZE)
858 return NULL;
859 return pos;
862 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
864 /* Nothing to do */
867 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
869 struct hlist_head *head;
870 struct hlist_node *node;
871 struct kprobe *p, *kp;
872 const char *sym = NULL;
873 unsigned int i = *(loff_t *) v;
874 unsigned long offset = 0;
875 char *modname, namebuf[128];
877 head = &kprobe_table[i];
878 preempt_disable();
879 hlist_for_each_entry_rcu(p, node, head, hlist) {
880 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
881 &offset, &modname, namebuf);
882 if (p->pre_handler == aggr_pre_handler) {
883 list_for_each_entry_rcu(kp, &p->list, list)
884 report_probe(pi, kp, sym, offset, modname);
885 } else
886 report_probe(pi, p, sym, offset, modname);
888 preempt_enable();
889 return 0;
892 static struct seq_operations kprobes_seq_ops = {
893 .start = kprobe_seq_start,
894 .next = kprobe_seq_next,
895 .stop = kprobe_seq_stop,
896 .show = show_kprobe_addr
899 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
901 return seq_open(filp, &kprobes_seq_ops);
904 static struct file_operations debugfs_kprobes_operations = {
905 .open = kprobes_open,
906 .read = seq_read,
907 .llseek = seq_lseek,
908 .release = seq_release,
911 static void __kprobes enable_all_kprobes(void)
913 struct hlist_head *head;
914 struct hlist_node *node;
915 struct kprobe *p;
916 unsigned int i;
918 mutex_lock(&kprobe_mutex);
920 /* If kprobes are already enabled, just return */
921 if (kprobe_enabled)
922 goto already_enabled;
924 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
925 head = &kprobe_table[i];
926 hlist_for_each_entry_rcu(p, node, head, hlist)
927 arch_arm_kprobe(p);
930 kprobe_enabled = true;
931 printk(KERN_INFO "Kprobes globally enabled\n");
933 already_enabled:
934 mutex_unlock(&kprobe_mutex);
935 return;
938 static void __kprobes disable_all_kprobes(void)
940 struct hlist_head *head;
941 struct hlist_node *node;
942 struct kprobe *p;
943 unsigned int i;
945 mutex_lock(&kprobe_mutex);
947 /* If kprobes are already disabled, just return */
948 if (!kprobe_enabled)
949 goto already_disabled;
951 kprobe_enabled = false;
952 printk(KERN_INFO "Kprobes globally disabled\n");
953 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
954 head = &kprobe_table[i];
955 hlist_for_each_entry_rcu(p, node, head, hlist) {
956 if (!arch_trampoline_kprobe(p))
957 arch_disarm_kprobe(p);
961 mutex_unlock(&kprobe_mutex);
962 /* Allow all currently running kprobes to complete */
963 synchronize_sched();
964 return;
966 already_disabled:
967 mutex_unlock(&kprobe_mutex);
968 return;
972 * XXX: The debugfs bool file interface doesn't allow for callbacks
973 * when the bool state is switched. We can reuse that facility when
974 * available
976 static ssize_t read_enabled_file_bool(struct file *file,
977 char __user *user_buf, size_t count, loff_t *ppos)
979 char buf[3];
981 if (kprobe_enabled)
982 buf[0] = '1';
983 else
984 buf[0] = '0';
985 buf[1] = '\n';
986 buf[2] = 0x00;
987 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
990 static ssize_t write_enabled_file_bool(struct file *file,
991 const char __user *user_buf, size_t count, loff_t *ppos)
993 char buf[32];
994 int buf_size;
996 buf_size = min(count, (sizeof(buf)-1));
997 if (copy_from_user(buf, user_buf, buf_size))
998 return -EFAULT;
1000 switch (buf[0]) {
1001 case 'y':
1002 case 'Y':
1003 case '1':
1004 enable_all_kprobes();
1005 break;
1006 case 'n':
1007 case 'N':
1008 case '0':
1009 disable_all_kprobes();
1010 break;
1013 return count;
1016 static struct file_operations fops_kp = {
1017 .read = read_enabled_file_bool,
1018 .write = write_enabled_file_bool,
1021 static int __kprobes debugfs_kprobe_init(void)
1023 struct dentry *dir, *file;
1024 unsigned int value = 1;
1026 dir = debugfs_create_dir("kprobes", NULL);
1027 if (!dir)
1028 return -ENOMEM;
1030 file = debugfs_create_file("list", 0444, dir, NULL,
1031 &debugfs_kprobes_operations);
1032 if (!file) {
1033 debugfs_remove(dir);
1034 return -ENOMEM;
1037 file = debugfs_create_file("enabled", 0600, dir,
1038 &value, &fops_kp);
1039 if (!file) {
1040 debugfs_remove(dir);
1041 return -ENOMEM;
1044 return 0;
1047 late_initcall(debugfs_kprobe_init);
1048 #endif /* CONFIG_DEBUG_FS */
1050 module_init(init_kprobes);
1052 EXPORT_SYMBOL_GPL(register_kprobe);
1053 EXPORT_SYMBOL_GPL(unregister_kprobe);
1054 EXPORT_SYMBOL_GPL(register_jprobe);
1055 EXPORT_SYMBOL_GPL(unregister_jprobe);
1056 #ifdef CONFIG_KPROBES
1057 EXPORT_SYMBOL_GPL(jprobe_return);
1058 #endif
1060 #ifdef CONFIG_KPROBES
1061 EXPORT_SYMBOL_GPL(register_kretprobe);
1062 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1063 #endif