2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
52 #include <asm-generic/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <asm/uaccess.h>
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
62 * Some oddball architectures like 64bit powerpc have function descriptors
63 * so this must be overridable.
65 #ifndef kprobe_lookup_name
66 #define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
70 static int kprobes_initialized
;
71 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
72 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
74 /* NOTE: change this value only with kprobe_mutex held */
75 static bool kprobes_all_disarmed
;
77 /* This protects kprobe_table and optimizing_list */
78 static DEFINE_MUTEX(kprobe_mutex
);
79 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
81 raw_spinlock_t lock ____cacheline_aligned_in_smp
;
82 } kretprobe_table_locks
[KPROBE_TABLE_SIZE
];
84 static raw_spinlock_t
*kretprobe_table_lock_ptr(unsigned long hash
)
86 return &(kretprobe_table_locks
[hash
].lock
);
90 * Normally, functions that we'd want to prohibit kprobes in, are marked
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
94 * For such cases, we now have a blacklist
96 static struct kprobe_blackpoint kprobe_blacklist
[] = {
97 {"preempt_schedule",},
98 {"native_get_debugreg",},
99 {"irq_entries_start",},
100 {"common_interrupt",},
101 {"mcount",}, /* mcount can be called from everywhere */
102 {NULL
} /* Terminator */
105 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
107 * kprobe->ainsn.insn points to the copy of the instruction to be
108 * single-stepped. x86_64, POWER4 and above have no-exec support and
109 * stepping on the instruction on a vmalloced/kmalloced/data page
110 * is a recipe for disaster
112 struct kprobe_insn_page
{
113 struct list_head list
;
114 kprobe_opcode_t
*insns
; /* Page of instruction slots */
120 #define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
124 struct kprobe_insn_cache
{
125 struct list_head pages
; /* list of kprobe_insn_page */
126 size_t insn_size
; /* size of instruction slot */
130 static int slots_per_page(struct kprobe_insn_cache
*c
)
132 return PAGE_SIZE
/(c
->insn_size
* sizeof(kprobe_opcode_t
));
135 enum kprobe_slot_state
{
141 static DEFINE_MUTEX(kprobe_insn_mutex
); /* Protects kprobe_insn_slots */
142 static struct kprobe_insn_cache kprobe_insn_slots
= {
143 .pages
= LIST_HEAD_INIT(kprobe_insn_slots
.pages
),
144 .insn_size
= MAX_INSN_SIZE
,
147 static int __kprobes
collect_garbage_slots(struct kprobe_insn_cache
*c
);
150 * __get_insn_slot() - Find a slot on an executable page for an instruction.
151 * We allocate an executable page if there's no room on existing ones.
153 static kprobe_opcode_t __kprobes
*__get_insn_slot(struct kprobe_insn_cache
*c
)
155 struct kprobe_insn_page
*kip
;
158 list_for_each_entry(kip
, &c
->pages
, list
) {
159 if (kip
->nused
< slots_per_page(c
)) {
161 for (i
= 0; i
< slots_per_page(c
); i
++) {
162 if (kip
->slot_used
[i
] == SLOT_CLEAN
) {
163 kip
->slot_used
[i
] = SLOT_USED
;
165 return kip
->insns
+ (i
* c
->insn_size
);
168 /* kip->nused is broken. Fix it. */
169 kip
->nused
= slots_per_page(c
);
174 /* If there are any garbage slots, collect it and try again. */
175 if (c
->nr_garbage
&& collect_garbage_slots(c
) == 0)
178 /* All out of space. Need to allocate a new page. */
179 kip
= kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c
)), GFP_KERNEL
);
184 * Use module_alloc so this page is within +/- 2GB of where the
185 * kernel image and loaded module images reside. This is required
186 * so x86_64 can correctly handle the %rip-relative fixups.
188 kip
->insns
= module_alloc(PAGE_SIZE
);
193 INIT_LIST_HEAD(&kip
->list
);
194 memset(kip
->slot_used
, SLOT_CLEAN
, slots_per_page(c
));
195 kip
->slot_used
[0] = SLOT_USED
;
198 list_add(&kip
->list
, &c
->pages
);
203 kprobe_opcode_t __kprobes
*get_insn_slot(void)
205 kprobe_opcode_t
*ret
= NULL
;
207 mutex_lock(&kprobe_insn_mutex
);
208 ret
= __get_insn_slot(&kprobe_insn_slots
);
209 mutex_unlock(&kprobe_insn_mutex
);
214 /* Return 1 if all garbages are collected, otherwise 0. */
215 static int __kprobes
collect_one_slot(struct kprobe_insn_page
*kip
, int idx
)
217 kip
->slot_used
[idx
] = SLOT_CLEAN
;
219 if (kip
->nused
== 0) {
221 * Page is no longer in use. Free it unless
222 * it's the last one. We keep the last one
223 * so as not to have to set it up again the
224 * next time somebody inserts a probe.
226 if (!list_is_singular(&kip
->list
)) {
227 list_del(&kip
->list
);
228 module_free(NULL
, kip
->insns
);
236 static int __kprobes
collect_garbage_slots(struct kprobe_insn_cache
*c
)
238 struct kprobe_insn_page
*kip
, *next
;
240 /* Ensure no-one is interrupted on the garbages */
243 list_for_each_entry_safe(kip
, next
, &c
->pages
, list
) {
245 if (kip
->ngarbage
== 0)
247 kip
->ngarbage
= 0; /* we will collect all garbages */
248 for (i
= 0; i
< slots_per_page(c
); i
++) {
249 if (kip
->slot_used
[i
] == SLOT_DIRTY
&&
250 collect_one_slot(kip
, i
))
258 static void __kprobes
__free_insn_slot(struct kprobe_insn_cache
*c
,
259 kprobe_opcode_t
*slot
, int dirty
)
261 struct kprobe_insn_page
*kip
;
263 list_for_each_entry(kip
, &c
->pages
, list
) {
264 long idx
= ((long)slot
- (long)kip
->insns
) /
265 (c
->insn_size
* sizeof(kprobe_opcode_t
));
266 if (idx
>= 0 && idx
< slots_per_page(c
)) {
267 WARN_ON(kip
->slot_used
[idx
] != SLOT_USED
);
269 kip
->slot_used
[idx
] = SLOT_DIRTY
;
271 if (++c
->nr_garbage
> slots_per_page(c
))
272 collect_garbage_slots(c
);
274 collect_one_slot(kip
, idx
);
278 /* Could not free this slot. */
282 void __kprobes
free_insn_slot(kprobe_opcode_t
* slot
, int dirty
)
284 mutex_lock(&kprobe_insn_mutex
);
285 __free_insn_slot(&kprobe_insn_slots
, slot
, dirty
);
286 mutex_unlock(&kprobe_insn_mutex
);
288 #ifdef CONFIG_OPTPROBES
289 /* For optimized_kprobe buffer */
290 static DEFINE_MUTEX(kprobe_optinsn_mutex
); /* Protects kprobe_optinsn_slots */
291 static struct kprobe_insn_cache kprobe_optinsn_slots
= {
292 .pages
= LIST_HEAD_INIT(kprobe_optinsn_slots
.pages
),
293 /* .insn_size is initialized later */
296 /* Get a slot for optimized_kprobe buffer */
297 kprobe_opcode_t __kprobes
*get_optinsn_slot(void)
299 kprobe_opcode_t
*ret
= NULL
;
301 mutex_lock(&kprobe_optinsn_mutex
);
302 ret
= __get_insn_slot(&kprobe_optinsn_slots
);
303 mutex_unlock(&kprobe_optinsn_mutex
);
308 void __kprobes
free_optinsn_slot(kprobe_opcode_t
* slot
, int dirty
)
310 mutex_lock(&kprobe_optinsn_mutex
);
311 __free_insn_slot(&kprobe_optinsn_slots
, slot
, dirty
);
312 mutex_unlock(&kprobe_optinsn_mutex
);
317 /* We have preemption disabled.. so it is safe to use __ versions */
318 static inline void set_kprobe_instance(struct kprobe
*kp
)
320 __this_cpu_write(kprobe_instance
, kp
);
323 static inline void reset_kprobe_instance(void)
325 __this_cpu_write(kprobe_instance
, NULL
);
329 * This routine is called either:
330 * - under the kprobe_mutex - during kprobe_[un]register()
332 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
334 struct kprobe __kprobes
*get_kprobe(void *addr
)
336 struct hlist_head
*head
;
339 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
340 hlist_for_each_entry_rcu(p
, head
, hlist
) {
348 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
);
350 /* Return true if the kprobe is an aggregator */
351 static inline int kprobe_aggrprobe(struct kprobe
*p
)
353 return p
->pre_handler
== aggr_pre_handler
;
356 /* Return true(!0) if the kprobe is unused */
357 static inline int kprobe_unused(struct kprobe
*p
)
359 return kprobe_aggrprobe(p
) && kprobe_disabled(p
) &&
360 list_empty(&p
->list
);
364 * Keep all fields in the kprobe consistent
366 static inline void copy_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
368 memcpy(&p
->opcode
, &ap
->opcode
, sizeof(kprobe_opcode_t
));
369 memcpy(&p
->ainsn
, &ap
->ainsn
, sizeof(struct arch_specific_insn
));
372 #ifdef CONFIG_OPTPROBES
373 /* NOTE: change this value only with kprobe_mutex held */
374 static bool kprobes_allow_optimization
;
377 * Call all pre_handler on the list, but ignores its return value.
378 * This must be called from arch-dep optimized caller.
380 void __kprobes
opt_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
384 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
385 if (kp
->pre_handler
&& likely(!kprobe_disabled(kp
))) {
386 set_kprobe_instance(kp
);
387 kp
->pre_handler(kp
, regs
);
389 reset_kprobe_instance();
393 /* Free optimized instructions and optimized_kprobe */
394 static __kprobes
void free_aggr_kprobe(struct kprobe
*p
)
396 struct optimized_kprobe
*op
;
398 op
= container_of(p
, struct optimized_kprobe
, kp
);
399 arch_remove_optimized_kprobe(op
);
400 arch_remove_kprobe(p
);
404 /* Return true(!0) if the kprobe is ready for optimization. */
405 static inline int kprobe_optready(struct kprobe
*p
)
407 struct optimized_kprobe
*op
;
409 if (kprobe_aggrprobe(p
)) {
410 op
= container_of(p
, struct optimized_kprobe
, kp
);
411 return arch_prepared_optinsn(&op
->optinsn
);
417 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
418 static inline int kprobe_disarmed(struct kprobe
*p
)
420 struct optimized_kprobe
*op
;
422 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
423 if (!kprobe_aggrprobe(p
))
424 return kprobe_disabled(p
);
426 op
= container_of(p
, struct optimized_kprobe
, kp
);
428 return kprobe_disabled(p
) && list_empty(&op
->list
);
431 /* Return true(!0) if the probe is queued on (un)optimizing lists */
432 static int __kprobes
kprobe_queued(struct kprobe
*p
)
434 struct optimized_kprobe
*op
;
436 if (kprobe_aggrprobe(p
)) {
437 op
= container_of(p
, struct optimized_kprobe
, kp
);
438 if (!list_empty(&op
->list
))
445 * Return an optimized kprobe whose optimizing code replaces
446 * instructions including addr (exclude breakpoint).
448 static struct kprobe
*__kprobes
get_optimized_kprobe(unsigned long addr
)
451 struct kprobe
*p
= NULL
;
452 struct optimized_kprobe
*op
;
454 /* Don't check i == 0, since that is a breakpoint case. */
455 for (i
= 1; !p
&& i
< MAX_OPTIMIZED_LENGTH
; i
++)
456 p
= get_kprobe((void *)(addr
- i
));
458 if (p
&& kprobe_optready(p
)) {
459 op
= container_of(p
, struct optimized_kprobe
, kp
);
460 if (arch_within_optimized_kprobe(op
, addr
))
467 /* Optimization staging list, protected by kprobe_mutex */
468 static LIST_HEAD(optimizing_list
);
469 static LIST_HEAD(unoptimizing_list
);
470 static LIST_HEAD(freeing_list
);
472 static void kprobe_optimizer(struct work_struct
*work
);
473 static DECLARE_DELAYED_WORK(optimizing_work
, kprobe_optimizer
);
474 #define OPTIMIZE_DELAY 5
477 * Optimize (replace a breakpoint with a jump) kprobes listed on
480 static __kprobes
void do_optimize_kprobes(void)
482 /* Optimization never be done when disarmed */
483 if (kprobes_all_disarmed
|| !kprobes_allow_optimization
||
484 list_empty(&optimizing_list
))
488 * The optimization/unoptimization refers online_cpus via
489 * stop_machine() and cpu-hotplug modifies online_cpus.
490 * And same time, text_mutex will be held in cpu-hotplug and here.
491 * This combination can cause a deadlock (cpu-hotplug try to lock
492 * text_mutex but stop_machine can not be done because online_cpus
494 * To avoid this deadlock, we need to call get_online_cpus()
495 * for preventing cpu-hotplug outside of text_mutex locking.
498 mutex_lock(&text_mutex
);
499 arch_optimize_kprobes(&optimizing_list
);
500 mutex_unlock(&text_mutex
);
505 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
506 * if need) kprobes listed on unoptimizing_list.
508 static __kprobes
void do_unoptimize_kprobes(void)
510 struct optimized_kprobe
*op
, *tmp
;
512 /* Unoptimization must be done anytime */
513 if (list_empty(&unoptimizing_list
))
516 /* Ditto to do_optimize_kprobes */
518 mutex_lock(&text_mutex
);
519 arch_unoptimize_kprobes(&unoptimizing_list
, &freeing_list
);
520 /* Loop free_list for disarming */
521 list_for_each_entry_safe(op
, tmp
, &freeing_list
, list
) {
522 /* Disarm probes if marked disabled */
523 if (kprobe_disabled(&op
->kp
))
524 arch_disarm_kprobe(&op
->kp
);
525 if (kprobe_unused(&op
->kp
)) {
527 * Remove unused probes from hash list. After waiting
528 * for synchronization, these probes are reclaimed.
529 * (reclaiming is done by do_free_cleaned_kprobes.)
531 hlist_del_rcu(&op
->kp
.hlist
);
533 list_del_init(&op
->list
);
535 mutex_unlock(&text_mutex
);
539 /* Reclaim all kprobes on the free_list */
540 static __kprobes
void do_free_cleaned_kprobes(void)
542 struct optimized_kprobe
*op
, *tmp
;
544 list_for_each_entry_safe(op
, tmp
, &freeing_list
, list
) {
545 BUG_ON(!kprobe_unused(&op
->kp
));
546 list_del_init(&op
->list
);
547 free_aggr_kprobe(&op
->kp
);
551 /* Start optimizer after OPTIMIZE_DELAY passed */
552 static __kprobes
void kick_kprobe_optimizer(void)
554 schedule_delayed_work(&optimizing_work
, OPTIMIZE_DELAY
);
557 /* Kprobe jump optimizer */
558 static __kprobes
void kprobe_optimizer(struct work_struct
*work
)
560 mutex_lock(&kprobe_mutex
);
561 /* Lock modules while optimizing kprobes */
562 mutex_lock(&module_mutex
);
565 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
566 * kprobes before waiting for quiesence period.
568 do_unoptimize_kprobes();
571 * Step 2: Wait for quiesence period to ensure all running interrupts
572 * are done. Because optprobe may modify multiple instructions
573 * there is a chance that Nth instruction is interrupted. In that
574 * case, running interrupt can return to 2nd-Nth byte of jump
575 * instruction. This wait is for avoiding it.
579 /* Step 3: Optimize kprobes after quiesence period */
580 do_optimize_kprobes();
582 /* Step 4: Free cleaned kprobes after quiesence period */
583 do_free_cleaned_kprobes();
585 mutex_unlock(&module_mutex
);
586 mutex_unlock(&kprobe_mutex
);
588 /* Step 5: Kick optimizer again if needed */
589 if (!list_empty(&optimizing_list
) || !list_empty(&unoptimizing_list
))
590 kick_kprobe_optimizer();
593 /* Wait for completing optimization and unoptimization */
594 static __kprobes
void wait_for_kprobe_optimizer(void)
596 mutex_lock(&kprobe_mutex
);
598 while (!list_empty(&optimizing_list
) || !list_empty(&unoptimizing_list
)) {
599 mutex_unlock(&kprobe_mutex
);
601 /* this will also make optimizing_work execute immmediately */
602 flush_delayed_work(&optimizing_work
);
603 /* @optimizing_work might not have been queued yet, relax */
606 mutex_lock(&kprobe_mutex
);
609 mutex_unlock(&kprobe_mutex
);
612 /* Optimize kprobe if p is ready to be optimized */
613 static __kprobes
void optimize_kprobe(struct kprobe
*p
)
615 struct optimized_kprobe
*op
;
617 /* Check if the kprobe is disabled or not ready for optimization. */
618 if (!kprobe_optready(p
) || !kprobes_allow_optimization
||
619 (kprobe_disabled(p
) || kprobes_all_disarmed
))
622 /* Both of break_handler and post_handler are not supported. */
623 if (p
->break_handler
|| p
->post_handler
)
626 op
= container_of(p
, struct optimized_kprobe
, kp
);
628 /* Check there is no other kprobes at the optimized instructions */
629 if (arch_check_optimized_kprobe(op
) < 0)
632 /* Check if it is already optimized. */
633 if (op
->kp
.flags
& KPROBE_FLAG_OPTIMIZED
)
635 op
->kp
.flags
|= KPROBE_FLAG_OPTIMIZED
;
637 if (!list_empty(&op
->list
))
638 /* This is under unoptimizing. Just dequeue the probe */
639 list_del_init(&op
->list
);
641 list_add(&op
->list
, &optimizing_list
);
642 kick_kprobe_optimizer();
646 /* Short cut to direct unoptimizing */
647 static __kprobes
void force_unoptimize_kprobe(struct optimized_kprobe
*op
)
650 arch_unoptimize_kprobe(op
);
652 if (kprobe_disabled(&op
->kp
))
653 arch_disarm_kprobe(&op
->kp
);
656 /* Unoptimize a kprobe if p is optimized */
657 static __kprobes
void unoptimize_kprobe(struct kprobe
*p
, bool force
)
659 struct optimized_kprobe
*op
;
661 if (!kprobe_aggrprobe(p
) || kprobe_disarmed(p
))
662 return; /* This is not an optprobe nor optimized */
664 op
= container_of(p
, struct optimized_kprobe
, kp
);
665 if (!kprobe_optimized(p
)) {
666 /* Unoptimized or unoptimizing case */
667 if (force
&& !list_empty(&op
->list
)) {
669 * Only if this is unoptimizing kprobe and forced,
670 * forcibly unoptimize it. (No need to unoptimize
671 * unoptimized kprobe again :)
673 list_del_init(&op
->list
);
674 force_unoptimize_kprobe(op
);
679 op
->kp
.flags
&= ~KPROBE_FLAG_OPTIMIZED
;
680 if (!list_empty(&op
->list
)) {
681 /* Dequeue from the optimization queue */
682 list_del_init(&op
->list
);
685 /* Optimized kprobe case */
687 /* Forcibly update the code: this is a special case */
688 force_unoptimize_kprobe(op
);
690 list_add(&op
->list
, &unoptimizing_list
);
691 kick_kprobe_optimizer();
695 /* Cancel unoptimizing for reusing */
696 static void reuse_unused_kprobe(struct kprobe
*ap
)
698 struct optimized_kprobe
*op
;
700 BUG_ON(!kprobe_unused(ap
));
702 * Unused kprobe MUST be on the way of delayed unoptimizing (means
703 * there is still a relative jump) and disabled.
705 op
= container_of(ap
, struct optimized_kprobe
, kp
);
706 if (unlikely(list_empty(&op
->list
)))
707 printk(KERN_WARNING
"Warning: found a stray unused "
708 "aggrprobe@%p\n", ap
->addr
);
709 /* Enable the probe again */
710 ap
->flags
&= ~KPROBE_FLAG_DISABLED
;
711 /* Optimize it again (remove from op->list) */
712 BUG_ON(!kprobe_optready(ap
));
716 /* Remove optimized instructions */
717 static void __kprobes
kill_optimized_kprobe(struct kprobe
*p
)
719 struct optimized_kprobe
*op
;
721 op
= container_of(p
, struct optimized_kprobe
, kp
);
722 if (!list_empty(&op
->list
))
723 /* Dequeue from the (un)optimization queue */
724 list_del_init(&op
->list
);
725 op
->kp
.flags
&= ~KPROBE_FLAG_OPTIMIZED
;
727 if (kprobe_unused(p
)) {
728 /* Enqueue if it is unused */
729 list_add(&op
->list
, &freeing_list
);
731 * Remove unused probes from the hash list. After waiting
732 * for synchronization, this probe is reclaimed.
733 * (reclaiming is done by do_free_cleaned_kprobes().)
735 hlist_del_rcu(&op
->kp
.hlist
);
738 /* Don't touch the code, because it is already freed. */
739 arch_remove_optimized_kprobe(op
);
742 /* Try to prepare optimized instructions */
743 static __kprobes
void prepare_optimized_kprobe(struct kprobe
*p
)
745 struct optimized_kprobe
*op
;
747 op
= container_of(p
, struct optimized_kprobe
, kp
);
748 arch_prepare_optimized_kprobe(op
);
751 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
752 static __kprobes
struct kprobe
*alloc_aggr_kprobe(struct kprobe
*p
)
754 struct optimized_kprobe
*op
;
756 op
= kzalloc(sizeof(struct optimized_kprobe
), GFP_KERNEL
);
760 INIT_LIST_HEAD(&op
->list
);
761 op
->kp
.addr
= p
->addr
;
762 arch_prepare_optimized_kprobe(op
);
767 static void __kprobes
init_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
);
770 * Prepare an optimized_kprobe and optimize it
771 * NOTE: p must be a normal registered kprobe
773 static __kprobes
void try_to_optimize_kprobe(struct kprobe
*p
)
776 struct optimized_kprobe
*op
;
778 /* Impossible to optimize ftrace-based kprobe */
779 if (kprobe_ftrace(p
))
782 /* For preparing optimization, jump_label_text_reserved() is called */
784 mutex_lock(&text_mutex
);
786 ap
= alloc_aggr_kprobe(p
);
790 op
= container_of(ap
, struct optimized_kprobe
, kp
);
791 if (!arch_prepared_optinsn(&op
->optinsn
)) {
792 /* If failed to setup optimizing, fallback to kprobe */
793 arch_remove_optimized_kprobe(op
);
798 init_aggr_kprobe(ap
, p
);
799 optimize_kprobe(ap
); /* This just kicks optimizer thread */
802 mutex_unlock(&text_mutex
);
807 static void __kprobes
optimize_all_kprobes(void)
809 struct hlist_head
*head
;
813 mutex_lock(&kprobe_mutex
);
814 /* If optimization is already allowed, just return */
815 if (kprobes_allow_optimization
)
818 kprobes_allow_optimization
= true;
819 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
820 head
= &kprobe_table
[i
];
821 hlist_for_each_entry_rcu(p
, head
, hlist
)
822 if (!kprobe_disabled(p
))
825 printk(KERN_INFO
"Kprobes globally optimized\n");
827 mutex_unlock(&kprobe_mutex
);
830 static void __kprobes
unoptimize_all_kprobes(void)
832 struct hlist_head
*head
;
836 mutex_lock(&kprobe_mutex
);
837 /* If optimization is already prohibited, just return */
838 if (!kprobes_allow_optimization
) {
839 mutex_unlock(&kprobe_mutex
);
843 kprobes_allow_optimization
= false;
844 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
845 head
= &kprobe_table
[i
];
846 hlist_for_each_entry_rcu(p
, head
, hlist
) {
847 if (!kprobe_disabled(p
))
848 unoptimize_kprobe(p
, false);
851 mutex_unlock(&kprobe_mutex
);
853 /* Wait for unoptimizing completion */
854 wait_for_kprobe_optimizer();
855 printk(KERN_INFO
"Kprobes globally unoptimized\n");
858 static DEFINE_MUTEX(kprobe_sysctl_mutex
);
859 int sysctl_kprobes_optimization
;
860 int proc_kprobes_optimization_handler(struct ctl_table
*table
, int write
,
861 void __user
*buffer
, size_t *length
,
866 mutex_lock(&kprobe_sysctl_mutex
);
867 sysctl_kprobes_optimization
= kprobes_allow_optimization
? 1 : 0;
868 ret
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
870 if (sysctl_kprobes_optimization
)
871 optimize_all_kprobes();
873 unoptimize_all_kprobes();
874 mutex_unlock(&kprobe_sysctl_mutex
);
878 #endif /* CONFIG_SYSCTL */
880 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
881 static void __kprobes
__arm_kprobe(struct kprobe
*p
)
885 /* Check collision with other optimized kprobes */
886 _p
= get_optimized_kprobe((unsigned long)p
->addr
);
888 /* Fallback to unoptimized kprobe */
889 unoptimize_kprobe(_p
, true);
892 optimize_kprobe(p
); /* Try to optimize (add kprobe to a list) */
895 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
896 static void __kprobes
__disarm_kprobe(struct kprobe
*p
, bool reopt
)
900 unoptimize_kprobe(p
, false); /* Try to unoptimize */
902 if (!kprobe_queued(p
)) {
903 arch_disarm_kprobe(p
);
904 /* If another kprobe was blocked, optimize it. */
905 _p
= get_optimized_kprobe((unsigned long)p
->addr
);
906 if (unlikely(_p
) && reopt
)
909 /* TODO: reoptimize others after unoptimized this probe */
912 #else /* !CONFIG_OPTPROBES */
914 #define optimize_kprobe(p) do {} while (0)
915 #define unoptimize_kprobe(p, f) do {} while (0)
916 #define kill_optimized_kprobe(p) do {} while (0)
917 #define prepare_optimized_kprobe(p) do {} while (0)
918 #define try_to_optimize_kprobe(p) do {} while (0)
919 #define __arm_kprobe(p) arch_arm_kprobe(p)
920 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
921 #define kprobe_disarmed(p) kprobe_disabled(p)
922 #define wait_for_kprobe_optimizer() do {} while (0)
924 /* There should be no unused kprobes can be reused without optimization */
925 static void reuse_unused_kprobe(struct kprobe
*ap
)
927 printk(KERN_ERR
"Error: There should be no unused kprobe here.\n");
928 BUG_ON(kprobe_unused(ap
));
931 static __kprobes
void free_aggr_kprobe(struct kprobe
*p
)
933 arch_remove_kprobe(p
);
937 static __kprobes
struct kprobe
*alloc_aggr_kprobe(struct kprobe
*p
)
939 return kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
941 #endif /* CONFIG_OPTPROBES */
943 #ifdef CONFIG_KPROBES_ON_FTRACE
944 static struct ftrace_ops kprobe_ftrace_ops __read_mostly
= {
945 .func
= kprobe_ftrace_handler
,
946 .flags
= FTRACE_OPS_FL_SAVE_REGS
,
948 static int kprobe_ftrace_enabled
;
950 /* Must ensure p->addr is really on ftrace */
951 static int __kprobes
prepare_kprobe(struct kprobe
*p
)
953 if (!kprobe_ftrace(p
))
954 return arch_prepare_kprobe(p
);
956 return arch_prepare_kprobe_ftrace(p
);
959 /* Caller must lock kprobe_mutex */
960 static void __kprobes
arm_kprobe_ftrace(struct kprobe
*p
)
964 ret
= ftrace_set_filter_ip(&kprobe_ftrace_ops
,
965 (unsigned long)p
->addr
, 0, 0);
966 WARN(ret
< 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p
->addr
, ret
);
967 kprobe_ftrace_enabled
++;
968 if (kprobe_ftrace_enabled
== 1) {
969 ret
= register_ftrace_function(&kprobe_ftrace_ops
);
970 WARN(ret
< 0, "Failed to init kprobe-ftrace (%d)\n", ret
);
974 /* Caller must lock kprobe_mutex */
975 static void __kprobes
disarm_kprobe_ftrace(struct kprobe
*p
)
979 kprobe_ftrace_enabled
--;
980 if (kprobe_ftrace_enabled
== 0) {
981 ret
= unregister_ftrace_function(&kprobe_ftrace_ops
);
982 WARN(ret
< 0, "Failed to init kprobe-ftrace (%d)\n", ret
);
984 ret
= ftrace_set_filter_ip(&kprobe_ftrace_ops
,
985 (unsigned long)p
->addr
, 1, 0);
986 WARN(ret
< 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p
->addr
, ret
);
988 #else /* !CONFIG_KPROBES_ON_FTRACE */
989 #define prepare_kprobe(p) arch_prepare_kprobe(p)
990 #define arm_kprobe_ftrace(p) do {} while (0)
991 #define disarm_kprobe_ftrace(p) do {} while (0)
994 /* Arm a kprobe with text_mutex */
995 static void __kprobes
arm_kprobe(struct kprobe
*kp
)
997 if (unlikely(kprobe_ftrace(kp
))) {
998 arm_kprobe_ftrace(kp
);
1002 * Here, since __arm_kprobe() doesn't use stop_machine(),
1003 * this doesn't cause deadlock on text_mutex. So, we don't
1004 * need get_online_cpus().
1006 mutex_lock(&text_mutex
);
1008 mutex_unlock(&text_mutex
);
1011 /* Disarm a kprobe with text_mutex */
1012 static void __kprobes
disarm_kprobe(struct kprobe
*kp
, bool reopt
)
1014 if (unlikely(kprobe_ftrace(kp
))) {
1015 disarm_kprobe_ftrace(kp
);
1019 mutex_lock(&text_mutex
);
1020 __disarm_kprobe(kp
, reopt
);
1021 mutex_unlock(&text_mutex
);
1025 * Aggregate handlers for multiple kprobes support - these handlers
1026 * take care of invoking the individual kprobe handlers on p->list
1028 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1032 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
1033 if (kp
->pre_handler
&& likely(!kprobe_disabled(kp
))) {
1034 set_kprobe_instance(kp
);
1035 if (kp
->pre_handler(kp
, regs
))
1038 reset_kprobe_instance();
1043 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
1044 unsigned long flags
)
1048 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
1049 if (kp
->post_handler
&& likely(!kprobe_disabled(kp
))) {
1050 set_kprobe_instance(kp
);
1051 kp
->post_handler(kp
, regs
, flags
);
1052 reset_kprobe_instance();
1057 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
1060 struct kprobe
*cur
= __this_cpu_read(kprobe_instance
);
1063 * if we faulted "during" the execution of a user specified
1064 * probe handler, invoke just that probe's fault handler
1066 if (cur
&& cur
->fault_handler
) {
1067 if (cur
->fault_handler(cur
, regs
, trapnr
))
1073 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1075 struct kprobe
*cur
= __this_cpu_read(kprobe_instance
);
1078 if (cur
&& cur
->break_handler
) {
1079 if (cur
->break_handler(cur
, regs
))
1082 reset_kprobe_instance();
1086 /* Walks the list and increments nmissed count for multiprobe case */
1087 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
1090 if (!kprobe_aggrprobe(p
)) {
1093 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1099 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
,
1100 struct hlist_head
*head
)
1102 struct kretprobe
*rp
= ri
->rp
;
1104 /* remove rp inst off the rprobe_inst_table */
1105 hlist_del(&ri
->hlist
);
1106 INIT_HLIST_NODE(&ri
->hlist
);
1108 raw_spin_lock(&rp
->lock
);
1109 hlist_add_head(&ri
->hlist
, &rp
->free_instances
);
1110 raw_spin_unlock(&rp
->lock
);
1113 hlist_add_head(&ri
->hlist
, head
);
1116 void __kprobes
kretprobe_hash_lock(struct task_struct
*tsk
,
1117 struct hlist_head
**head
, unsigned long *flags
)
1118 __acquires(hlist_lock
)
1120 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
1121 raw_spinlock_t
*hlist_lock
;
1123 *head
= &kretprobe_inst_table
[hash
];
1124 hlist_lock
= kretprobe_table_lock_ptr(hash
);
1125 raw_spin_lock_irqsave(hlist_lock
, *flags
);
1128 static void __kprobes
kretprobe_table_lock(unsigned long hash
,
1129 unsigned long *flags
)
1130 __acquires(hlist_lock
)
1132 raw_spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
1133 raw_spin_lock_irqsave(hlist_lock
, *flags
);
1136 void __kprobes
kretprobe_hash_unlock(struct task_struct
*tsk
,
1137 unsigned long *flags
)
1138 __releases(hlist_lock
)
1140 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
1141 raw_spinlock_t
*hlist_lock
;
1143 hlist_lock
= kretprobe_table_lock_ptr(hash
);
1144 raw_spin_unlock_irqrestore(hlist_lock
, *flags
);
1147 static void __kprobes
kretprobe_table_unlock(unsigned long hash
,
1148 unsigned long *flags
)
1149 __releases(hlist_lock
)
1151 raw_spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
1152 raw_spin_unlock_irqrestore(hlist_lock
, *flags
);
1156 * This function is called from finish_task_switch when task tk becomes dead,
1157 * so that we can recycle any function-return probe instances associated
1158 * with this task. These left over instances represent probed functions
1159 * that have been called but will never return.
1161 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
1163 struct kretprobe_instance
*ri
;
1164 struct hlist_head
*head
, empty_rp
;
1165 struct hlist_node
*tmp
;
1166 unsigned long hash
, flags
= 0;
1168 if (unlikely(!kprobes_initialized
))
1169 /* Early boot. kretprobe_table_locks not yet initialized. */
1172 INIT_HLIST_HEAD(&empty_rp
);
1173 hash
= hash_ptr(tk
, KPROBE_HASH_BITS
);
1174 head
= &kretprobe_inst_table
[hash
];
1175 kretprobe_table_lock(hash
, &flags
);
1176 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
1178 recycle_rp_inst(ri
, &empty_rp
);
1180 kretprobe_table_unlock(hash
, &flags
);
1181 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
1182 hlist_del(&ri
->hlist
);
1187 static inline void free_rp_inst(struct kretprobe
*rp
)
1189 struct kretprobe_instance
*ri
;
1190 struct hlist_node
*next
;
1192 hlist_for_each_entry_safe(ri
, next
, &rp
->free_instances
, hlist
) {
1193 hlist_del(&ri
->hlist
);
1198 static void __kprobes
cleanup_rp_inst(struct kretprobe
*rp
)
1200 unsigned long flags
, hash
;
1201 struct kretprobe_instance
*ri
;
1202 struct hlist_node
*next
;
1203 struct hlist_head
*head
;
1206 for (hash
= 0; hash
< KPROBE_TABLE_SIZE
; hash
++) {
1207 kretprobe_table_lock(hash
, &flags
);
1208 head
= &kretprobe_inst_table
[hash
];
1209 hlist_for_each_entry_safe(ri
, next
, head
, hlist
) {
1213 kretprobe_table_unlock(hash
, &flags
);
1219 * Add the new probe to ap->list. Fail if this is the
1220 * second jprobe at the address - two jprobes can't coexist
1222 static int __kprobes
add_new_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
1224 BUG_ON(kprobe_gone(ap
) || kprobe_gone(p
));
1226 if (p
->break_handler
|| p
->post_handler
)
1227 unoptimize_kprobe(ap
, true); /* Fall back to normal kprobe */
1229 if (p
->break_handler
) {
1230 if (ap
->break_handler
)
1232 list_add_tail_rcu(&p
->list
, &ap
->list
);
1233 ap
->break_handler
= aggr_break_handler
;
1235 list_add_rcu(&p
->list
, &ap
->list
);
1236 if (p
->post_handler
&& !ap
->post_handler
)
1237 ap
->post_handler
= aggr_post_handler
;
1243 * Fill in the required fields of the "manager kprobe". Replace the
1244 * earlier kprobe in the hlist with the manager kprobe
1246 static void __kprobes
init_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
1248 /* Copy p's insn slot to ap */
1250 flush_insn_slot(ap
);
1252 ap
->flags
= p
->flags
& ~KPROBE_FLAG_OPTIMIZED
;
1253 ap
->pre_handler
= aggr_pre_handler
;
1254 ap
->fault_handler
= aggr_fault_handler
;
1255 /* We don't care the kprobe which has gone. */
1256 if (p
->post_handler
&& !kprobe_gone(p
))
1257 ap
->post_handler
= aggr_post_handler
;
1258 if (p
->break_handler
&& !kprobe_gone(p
))
1259 ap
->break_handler
= aggr_break_handler
;
1261 INIT_LIST_HEAD(&ap
->list
);
1262 INIT_HLIST_NODE(&ap
->hlist
);
1264 list_add_rcu(&p
->list
, &ap
->list
);
1265 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
1269 * This is the second or subsequent kprobe at the address - handle
1272 static int __kprobes
register_aggr_kprobe(struct kprobe
*orig_p
,
1276 struct kprobe
*ap
= orig_p
;
1278 /* For preparing optimization, jump_label_text_reserved() is called */
1281 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1282 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1285 mutex_lock(&text_mutex
);
1287 if (!kprobe_aggrprobe(orig_p
)) {
1288 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1289 ap
= alloc_aggr_kprobe(orig_p
);
1294 init_aggr_kprobe(ap
, orig_p
);
1295 } else if (kprobe_unused(ap
))
1296 /* This probe is going to die. Rescue it */
1297 reuse_unused_kprobe(ap
);
1299 if (kprobe_gone(ap
)) {
1301 * Attempting to insert new probe at the same location that
1302 * had a probe in the module vaddr area which already
1303 * freed. So, the instruction slot has already been
1304 * released. We need a new slot for the new probe.
1306 ret
= arch_prepare_kprobe(ap
);
1309 * Even if fail to allocate new slot, don't need to
1310 * free aggr_probe. It will be used next time, or
1311 * freed by unregister_kprobe.
1315 /* Prepare optimized instructions if possible. */
1316 prepare_optimized_kprobe(ap
);
1319 * Clear gone flag to prevent allocating new slot again, and
1320 * set disabled flag because it is not armed yet.
1322 ap
->flags
= (ap
->flags
& ~KPROBE_FLAG_GONE
)
1323 | KPROBE_FLAG_DISABLED
;
1326 /* Copy ap's insn slot to p */
1328 ret
= add_new_kprobe(ap
, p
);
1331 mutex_unlock(&text_mutex
);
1333 jump_label_unlock();
1335 if (ret
== 0 && kprobe_disabled(ap
) && !kprobe_disabled(p
)) {
1336 ap
->flags
&= ~KPROBE_FLAG_DISABLED
;
1337 if (!kprobes_all_disarmed
)
1338 /* Arm the breakpoint again. */
1344 static int __kprobes
in_kprobes_functions(unsigned long addr
)
1346 struct kprobe_blackpoint
*kb
;
1348 if (addr
>= (unsigned long)__kprobes_text_start
&&
1349 addr
< (unsigned long)__kprobes_text_end
)
1352 * If there exists a kprobe_blacklist, verify and
1353 * fail any probe registration in the prohibited area
1355 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
1356 if (kb
->start_addr
) {
1357 if (addr
>= kb
->start_addr
&&
1358 addr
< (kb
->start_addr
+ kb
->range
))
1366 * If we have a symbol_name argument, look it up and add the offset field
1367 * to it. This way, we can specify a relative address to a symbol.
1368 * This returns encoded errors if it fails to look up symbol or invalid
1369 * combination of parameters.
1371 static kprobe_opcode_t __kprobes
*kprobe_addr(struct kprobe
*p
)
1373 kprobe_opcode_t
*addr
= p
->addr
;
1375 if ((p
->symbol_name
&& p
->addr
) ||
1376 (!p
->symbol_name
&& !p
->addr
))
1379 if (p
->symbol_name
) {
1380 kprobe_lookup_name(p
->symbol_name
, addr
);
1382 return ERR_PTR(-ENOENT
);
1385 addr
= (kprobe_opcode_t
*)(((char *)addr
) + p
->offset
);
1390 return ERR_PTR(-EINVAL
);
1393 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1394 static struct kprobe
* __kprobes
__get_valid_kprobe(struct kprobe
*p
)
1396 struct kprobe
*ap
, *list_p
;
1398 ap
= get_kprobe(p
->addr
);
1403 list_for_each_entry_rcu(list_p
, &ap
->list
, list
)
1405 /* kprobe p is a valid probe */
1413 /* Return error if the kprobe is being re-registered */
1414 static inline int check_kprobe_rereg(struct kprobe
*p
)
1418 mutex_lock(&kprobe_mutex
);
1419 if (__get_valid_kprobe(p
))
1421 mutex_unlock(&kprobe_mutex
);
1426 static __kprobes
int check_kprobe_address_safe(struct kprobe
*p
,
1427 struct module
**probed_mod
)
1430 unsigned long ftrace_addr
;
1433 * If the address is located on a ftrace nop, set the
1434 * breakpoint to the following instruction.
1436 ftrace_addr
= ftrace_location((unsigned long)p
->addr
);
1438 #ifdef CONFIG_KPROBES_ON_FTRACE
1439 /* Given address is not on the instruction boundary */
1440 if ((unsigned long)p
->addr
!= ftrace_addr
)
1442 p
->flags
|= KPROBE_FLAG_FTRACE
;
1443 #else /* !CONFIG_KPROBES_ON_FTRACE */
1451 /* Ensure it is not in reserved area nor out of text */
1452 if (!kernel_text_address((unsigned long) p
->addr
) ||
1453 in_kprobes_functions((unsigned long) p
->addr
) ||
1454 jump_label_text_reserved(p
->addr
, p
->addr
)) {
1459 /* Check if are we probing a module */
1460 *probed_mod
= __module_text_address((unsigned long) p
->addr
);
1463 * We must hold a refcount of the probed module while updating
1464 * its code to prohibit unexpected unloading.
1466 if (unlikely(!try_module_get(*probed_mod
))) {
1472 * If the module freed .init.text, we couldn't insert
1475 if (within_module_init((unsigned long)p
->addr
, *probed_mod
) &&
1476 (*probed_mod
)->state
!= MODULE_STATE_COMING
) {
1477 module_put(*probed_mod
);
1484 jump_label_unlock();
1489 int __kprobes
register_kprobe(struct kprobe
*p
)
1492 struct kprobe
*old_p
;
1493 struct module
*probed_mod
;
1494 kprobe_opcode_t
*addr
;
1496 /* Adjust probe address from symbol */
1497 addr
= kprobe_addr(p
);
1499 return PTR_ERR(addr
);
1502 ret
= check_kprobe_rereg(p
);
1506 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1507 p
->flags
&= KPROBE_FLAG_DISABLED
;
1509 INIT_LIST_HEAD(&p
->list
);
1511 ret
= check_kprobe_address_safe(p
, &probed_mod
);
1515 mutex_lock(&kprobe_mutex
);
1517 old_p
= get_kprobe(p
->addr
);
1519 /* Since this may unoptimize old_p, locking text_mutex. */
1520 ret
= register_aggr_kprobe(old_p
, p
);
1524 mutex_lock(&text_mutex
); /* Avoiding text modification */
1525 ret
= prepare_kprobe(p
);
1526 mutex_unlock(&text_mutex
);
1530 INIT_HLIST_NODE(&p
->hlist
);
1531 hlist_add_head_rcu(&p
->hlist
,
1532 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
1534 if (!kprobes_all_disarmed
&& !kprobe_disabled(p
))
1537 /* Try to optimize kprobe */
1538 try_to_optimize_kprobe(p
);
1541 mutex_unlock(&kprobe_mutex
);
1544 module_put(probed_mod
);
1548 EXPORT_SYMBOL_GPL(register_kprobe
);
1550 /* Check if all probes on the aggrprobe are disabled */
1551 static int __kprobes
aggr_kprobe_disabled(struct kprobe
*ap
)
1555 list_for_each_entry_rcu(kp
, &ap
->list
, list
)
1556 if (!kprobe_disabled(kp
))
1558 * There is an active probe on the list.
1559 * We can't disable this ap.
1566 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1567 static struct kprobe
*__kprobes
__disable_kprobe(struct kprobe
*p
)
1569 struct kprobe
*orig_p
;
1571 /* Get an original kprobe for return */
1572 orig_p
= __get_valid_kprobe(p
);
1573 if (unlikely(orig_p
== NULL
))
1576 if (!kprobe_disabled(p
)) {
1577 /* Disable probe if it is a child probe */
1579 p
->flags
|= KPROBE_FLAG_DISABLED
;
1581 /* Try to disarm and disable this/parent probe */
1582 if (p
== orig_p
|| aggr_kprobe_disabled(orig_p
)) {
1583 disarm_kprobe(orig_p
, true);
1584 orig_p
->flags
|= KPROBE_FLAG_DISABLED
;
1592 * Unregister a kprobe without a scheduler synchronization.
1594 static int __kprobes
__unregister_kprobe_top(struct kprobe
*p
)
1596 struct kprobe
*ap
, *list_p
;
1598 /* Disable kprobe. This will disarm it if needed. */
1599 ap
= __disable_kprobe(p
);
1605 * This probe is an independent(and non-optimized) kprobe
1606 * (not an aggrprobe). Remove from the hash list.
1610 /* Following process expects this probe is an aggrprobe */
1611 WARN_ON(!kprobe_aggrprobe(ap
));
1613 if (list_is_singular(&ap
->list
) && kprobe_disarmed(ap
))
1615 * !disarmed could be happen if the probe is under delayed
1620 /* If disabling probe has special handlers, update aggrprobe */
1621 if (p
->break_handler
&& !kprobe_gone(p
))
1622 ap
->break_handler
= NULL
;
1623 if (p
->post_handler
&& !kprobe_gone(p
)) {
1624 list_for_each_entry_rcu(list_p
, &ap
->list
, list
) {
1625 if ((list_p
!= p
) && (list_p
->post_handler
))
1628 ap
->post_handler
= NULL
;
1632 * Remove from the aggrprobe: this path will do nothing in
1633 * __unregister_kprobe_bottom().
1635 list_del_rcu(&p
->list
);
1636 if (!kprobe_disabled(ap
) && !kprobes_all_disarmed
)
1638 * Try to optimize this probe again, because post
1639 * handler may have been changed.
1641 optimize_kprobe(ap
);
1646 BUG_ON(!kprobe_disarmed(ap
));
1647 hlist_del_rcu(&ap
->hlist
);
1651 static void __kprobes
__unregister_kprobe_bottom(struct kprobe
*p
)
1655 if (list_empty(&p
->list
))
1656 /* This is an independent kprobe */
1657 arch_remove_kprobe(p
);
1658 else if (list_is_singular(&p
->list
)) {
1659 /* This is the last child of an aggrprobe */
1660 ap
= list_entry(p
->list
.next
, struct kprobe
, list
);
1662 free_aggr_kprobe(ap
);
1664 /* Otherwise, do nothing. */
1667 int __kprobes
register_kprobes(struct kprobe
**kps
, int num
)
1673 for (i
= 0; i
< num
; i
++) {
1674 ret
= register_kprobe(kps
[i
]);
1677 unregister_kprobes(kps
, i
);
1683 EXPORT_SYMBOL_GPL(register_kprobes
);
1685 void __kprobes
unregister_kprobe(struct kprobe
*p
)
1687 unregister_kprobes(&p
, 1);
1689 EXPORT_SYMBOL_GPL(unregister_kprobe
);
1691 void __kprobes
unregister_kprobes(struct kprobe
**kps
, int num
)
1697 mutex_lock(&kprobe_mutex
);
1698 for (i
= 0; i
< num
; i
++)
1699 if (__unregister_kprobe_top(kps
[i
]) < 0)
1700 kps
[i
]->addr
= NULL
;
1701 mutex_unlock(&kprobe_mutex
);
1703 synchronize_sched();
1704 for (i
= 0; i
< num
; i
++)
1706 __unregister_kprobe_bottom(kps
[i
]);
1708 EXPORT_SYMBOL_GPL(unregister_kprobes
);
1710 static struct notifier_block kprobe_exceptions_nb
= {
1711 .notifier_call
= kprobe_exceptions_notify
,
1712 .priority
= 0x7fffffff /* we need to be notified first */
1715 unsigned long __weak
arch_deref_entry_point(void *entry
)
1717 return (unsigned long)entry
;
1720 int __kprobes
register_jprobes(struct jprobe
**jps
, int num
)
1727 for (i
= 0; i
< num
; i
++) {
1728 unsigned long addr
, offset
;
1730 addr
= arch_deref_entry_point(jp
->entry
);
1732 /* Verify probepoint is a function entry point */
1733 if (kallsyms_lookup_size_offset(addr
, NULL
, &offset
) &&
1735 jp
->kp
.pre_handler
= setjmp_pre_handler
;
1736 jp
->kp
.break_handler
= longjmp_break_handler
;
1737 ret
= register_kprobe(&jp
->kp
);
1743 unregister_jprobes(jps
, i
);
1749 EXPORT_SYMBOL_GPL(register_jprobes
);
1751 int __kprobes
register_jprobe(struct jprobe
*jp
)
1753 return register_jprobes(&jp
, 1);
1755 EXPORT_SYMBOL_GPL(register_jprobe
);
1757 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
1759 unregister_jprobes(&jp
, 1);
1761 EXPORT_SYMBOL_GPL(unregister_jprobe
);
1763 void __kprobes
unregister_jprobes(struct jprobe
**jps
, int num
)
1769 mutex_lock(&kprobe_mutex
);
1770 for (i
= 0; i
< num
; i
++)
1771 if (__unregister_kprobe_top(&jps
[i
]->kp
) < 0)
1772 jps
[i
]->kp
.addr
= NULL
;
1773 mutex_unlock(&kprobe_mutex
);
1775 synchronize_sched();
1776 for (i
= 0; i
< num
; i
++) {
1777 if (jps
[i
]->kp
.addr
)
1778 __unregister_kprobe_bottom(&jps
[i
]->kp
);
1781 EXPORT_SYMBOL_GPL(unregister_jprobes
);
1783 #ifdef CONFIG_KRETPROBES
1785 * This kprobe pre_handler is registered with every kretprobe. When probe
1786 * hits it will set up the return probe.
1788 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1789 struct pt_regs
*regs
)
1791 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
1792 unsigned long hash
, flags
= 0;
1793 struct kretprobe_instance
*ri
;
1795 /*TODO: consider to only swap the RA after the last pre_handler fired */
1796 hash
= hash_ptr(current
, KPROBE_HASH_BITS
);
1797 raw_spin_lock_irqsave(&rp
->lock
, flags
);
1798 if (!hlist_empty(&rp
->free_instances
)) {
1799 ri
= hlist_entry(rp
->free_instances
.first
,
1800 struct kretprobe_instance
, hlist
);
1801 hlist_del(&ri
->hlist
);
1802 raw_spin_unlock_irqrestore(&rp
->lock
, flags
);
1807 if (rp
->entry_handler
&& rp
->entry_handler(ri
, regs
)) {
1808 raw_spin_lock_irqsave(&rp
->lock
, flags
);
1809 hlist_add_head(&ri
->hlist
, &rp
->free_instances
);
1810 raw_spin_unlock_irqrestore(&rp
->lock
, flags
);
1814 arch_prepare_kretprobe(ri
, regs
);
1816 /* XXX(hch): why is there no hlist_move_head? */
1817 INIT_HLIST_NODE(&ri
->hlist
);
1818 kretprobe_table_lock(hash
, &flags
);
1819 hlist_add_head(&ri
->hlist
, &kretprobe_inst_table
[hash
]);
1820 kretprobe_table_unlock(hash
, &flags
);
1823 raw_spin_unlock_irqrestore(&rp
->lock
, flags
);
1828 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1831 struct kretprobe_instance
*inst
;
1835 if (kretprobe_blacklist_size
) {
1836 addr
= kprobe_addr(&rp
->kp
);
1838 return PTR_ERR(addr
);
1840 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1841 if (kretprobe_blacklist
[i
].addr
== addr
)
1846 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
1847 rp
->kp
.post_handler
= NULL
;
1848 rp
->kp
.fault_handler
= NULL
;
1849 rp
->kp
.break_handler
= NULL
;
1851 /* Pre-allocate memory for max kretprobe instances */
1852 if (rp
->maxactive
<= 0) {
1853 #ifdef CONFIG_PREEMPT
1854 rp
->maxactive
= max_t(unsigned int, 10, 2*num_possible_cpus());
1856 rp
->maxactive
= num_possible_cpus();
1859 raw_spin_lock_init(&rp
->lock
);
1860 INIT_HLIST_HEAD(&rp
->free_instances
);
1861 for (i
= 0; i
< rp
->maxactive
; i
++) {
1862 inst
= kmalloc(sizeof(struct kretprobe_instance
) +
1863 rp
->data_size
, GFP_KERNEL
);
1868 INIT_HLIST_NODE(&inst
->hlist
);
1869 hlist_add_head(&inst
->hlist
, &rp
->free_instances
);
1873 /* Establish function entry probe point */
1874 ret
= register_kprobe(&rp
->kp
);
1879 EXPORT_SYMBOL_GPL(register_kretprobe
);
1881 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1887 for (i
= 0; i
< num
; i
++) {
1888 ret
= register_kretprobe(rps
[i
]);
1891 unregister_kretprobes(rps
, i
);
1897 EXPORT_SYMBOL_GPL(register_kretprobes
);
1899 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1901 unregister_kretprobes(&rp
, 1);
1903 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1905 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1911 mutex_lock(&kprobe_mutex
);
1912 for (i
= 0; i
< num
; i
++)
1913 if (__unregister_kprobe_top(&rps
[i
]->kp
) < 0)
1914 rps
[i
]->kp
.addr
= NULL
;
1915 mutex_unlock(&kprobe_mutex
);
1917 synchronize_sched();
1918 for (i
= 0; i
< num
; i
++) {
1919 if (rps
[i
]->kp
.addr
) {
1920 __unregister_kprobe_bottom(&rps
[i
]->kp
);
1921 cleanup_rp_inst(rps
[i
]);
1925 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1927 #else /* CONFIG_KRETPROBES */
1928 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1932 EXPORT_SYMBOL_GPL(register_kretprobe
);
1934 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1938 EXPORT_SYMBOL_GPL(register_kretprobes
);
1940 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1943 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1945 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1948 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1950 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1951 struct pt_regs
*regs
)
1956 #endif /* CONFIG_KRETPROBES */
1958 /* Set the kprobe gone and remove its instruction buffer. */
1959 static void __kprobes
kill_kprobe(struct kprobe
*p
)
1963 p
->flags
|= KPROBE_FLAG_GONE
;
1964 if (kprobe_aggrprobe(p
)) {
1966 * If this is an aggr_kprobe, we have to list all the
1967 * chained probes and mark them GONE.
1969 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1970 kp
->flags
|= KPROBE_FLAG_GONE
;
1971 p
->post_handler
= NULL
;
1972 p
->break_handler
= NULL
;
1973 kill_optimized_kprobe(p
);
1976 * Here, we can remove insn_slot safely, because no thread calls
1977 * the original probed function (which will be freed soon) any more.
1979 arch_remove_kprobe(p
);
1982 /* Disable one kprobe */
1983 int __kprobes
disable_kprobe(struct kprobe
*kp
)
1987 mutex_lock(&kprobe_mutex
);
1989 /* Disable this kprobe */
1990 if (__disable_kprobe(kp
) == NULL
)
1993 mutex_unlock(&kprobe_mutex
);
1996 EXPORT_SYMBOL_GPL(disable_kprobe
);
1998 /* Enable one kprobe */
1999 int __kprobes
enable_kprobe(struct kprobe
*kp
)
2004 mutex_lock(&kprobe_mutex
);
2006 /* Check whether specified probe is valid. */
2007 p
= __get_valid_kprobe(kp
);
2008 if (unlikely(p
== NULL
)) {
2013 if (kprobe_gone(kp
)) {
2014 /* This kprobe has gone, we couldn't enable it. */
2020 kp
->flags
&= ~KPROBE_FLAG_DISABLED
;
2022 if (!kprobes_all_disarmed
&& kprobe_disabled(p
)) {
2023 p
->flags
&= ~KPROBE_FLAG_DISABLED
;
2027 mutex_unlock(&kprobe_mutex
);
2030 EXPORT_SYMBOL_GPL(enable_kprobe
);
2032 void __kprobes
dump_kprobe(struct kprobe
*kp
)
2034 printk(KERN_WARNING
"Dumping kprobe:\n");
2035 printk(KERN_WARNING
"Name: %s\nAddress: %p\nOffset: %x\n",
2036 kp
->symbol_name
, kp
->addr
, kp
->offset
);
2039 /* Module notifier call back, checking kprobes on the module */
2040 static int __kprobes
kprobes_module_callback(struct notifier_block
*nb
,
2041 unsigned long val
, void *data
)
2043 struct module
*mod
= data
;
2044 struct hlist_head
*head
;
2047 int checkcore
= (val
== MODULE_STATE_GOING
);
2049 if (val
!= MODULE_STATE_GOING
&& val
!= MODULE_STATE_LIVE
)
2053 * When MODULE_STATE_GOING was notified, both of module .text and
2054 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2055 * notified, only .init.text section would be freed. We need to
2056 * disable kprobes which have been inserted in the sections.
2058 mutex_lock(&kprobe_mutex
);
2059 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
2060 head
= &kprobe_table
[i
];
2061 hlist_for_each_entry_rcu(p
, head
, hlist
)
2062 if (within_module_init((unsigned long)p
->addr
, mod
) ||
2064 within_module_core((unsigned long)p
->addr
, mod
))) {
2066 * The vaddr this probe is installed will soon
2067 * be vfreed buy not synced to disk. Hence,
2068 * disarming the breakpoint isn't needed.
2073 mutex_unlock(&kprobe_mutex
);
2077 static struct notifier_block kprobe_module_nb
= {
2078 .notifier_call
= kprobes_module_callback
,
2082 static int __init
init_kprobes(void)
2085 unsigned long offset
= 0, size
= 0;
2086 char *modname
, namebuf
[128];
2087 const char *symbol_name
;
2089 struct kprobe_blackpoint
*kb
;
2091 /* FIXME allocate the probe table, currently defined statically */
2092 /* initialize all list heads */
2093 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
2094 INIT_HLIST_HEAD(&kprobe_table
[i
]);
2095 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
2096 raw_spin_lock_init(&(kretprobe_table_locks
[i
].lock
));
2100 * Lookup and populate the kprobe_blacklist.
2102 * Unlike the kretprobe blacklist, we'll need to determine
2103 * the range of addresses that belong to the said functions,
2104 * since a kprobe need not necessarily be at the beginning
2107 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
2108 kprobe_lookup_name(kb
->name
, addr
);
2112 kb
->start_addr
= (unsigned long)addr
;
2113 symbol_name
= kallsyms_lookup(kb
->start_addr
,
2114 &size
, &offset
, &modname
, namebuf
);
2121 if (kretprobe_blacklist_size
) {
2122 /* lookup the function address from its name */
2123 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
2124 kprobe_lookup_name(kretprobe_blacklist
[i
].name
,
2125 kretprobe_blacklist
[i
].addr
);
2126 if (!kretprobe_blacklist
[i
].addr
)
2127 printk("kretprobe: lookup failed: %s\n",
2128 kretprobe_blacklist
[i
].name
);
2132 #if defined(CONFIG_OPTPROBES)
2133 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2134 /* Init kprobe_optinsn_slots */
2135 kprobe_optinsn_slots
.insn_size
= MAX_OPTINSN_SIZE
;
2137 /* By default, kprobes can be optimized */
2138 kprobes_allow_optimization
= true;
2141 /* By default, kprobes are armed */
2142 kprobes_all_disarmed
= false;
2144 err
= arch_init_kprobes();
2146 err
= register_die_notifier(&kprobe_exceptions_nb
);
2148 err
= register_module_notifier(&kprobe_module_nb
);
2150 kprobes_initialized
= (err
== 0);
2157 #ifdef CONFIG_DEBUG_FS
2158 static void __kprobes
report_probe(struct seq_file
*pi
, struct kprobe
*p
,
2159 const char *sym
, int offset
, char *modname
, struct kprobe
*pp
)
2163 if (p
->pre_handler
== pre_handler_kretprobe
)
2165 else if (p
->pre_handler
== setjmp_pre_handler
)
2171 seq_printf(pi
, "%p %s %s+0x%x %s ",
2172 p
->addr
, kprobe_type
, sym
, offset
,
2173 (modname
? modname
: " "));
2175 seq_printf(pi
, "%p %s %p ",
2176 p
->addr
, kprobe_type
, p
->addr
);
2180 seq_printf(pi
, "%s%s%s%s\n",
2181 (kprobe_gone(p
) ? "[GONE]" : ""),
2182 ((kprobe_disabled(p
) && !kprobe_gone(p
)) ? "[DISABLED]" : ""),
2183 (kprobe_optimized(pp
) ? "[OPTIMIZED]" : ""),
2184 (kprobe_ftrace(pp
) ? "[FTRACE]" : ""));
2187 static void __kprobes
*kprobe_seq_start(struct seq_file
*f
, loff_t
*pos
)
2189 return (*pos
< KPROBE_TABLE_SIZE
) ? pos
: NULL
;
2192 static void __kprobes
*kprobe_seq_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
2195 if (*pos
>= KPROBE_TABLE_SIZE
)
2200 static void __kprobes
kprobe_seq_stop(struct seq_file
*f
, void *v
)
2205 static int __kprobes
show_kprobe_addr(struct seq_file
*pi
, void *v
)
2207 struct hlist_head
*head
;
2208 struct kprobe
*p
, *kp
;
2209 const char *sym
= NULL
;
2210 unsigned int i
= *(loff_t
*) v
;
2211 unsigned long offset
= 0;
2212 char *modname
, namebuf
[128];
2214 head
= &kprobe_table
[i
];
2216 hlist_for_each_entry_rcu(p
, head
, hlist
) {
2217 sym
= kallsyms_lookup((unsigned long)p
->addr
, NULL
,
2218 &offset
, &modname
, namebuf
);
2219 if (kprobe_aggrprobe(p
)) {
2220 list_for_each_entry_rcu(kp
, &p
->list
, list
)
2221 report_probe(pi
, kp
, sym
, offset
, modname
, p
);
2223 report_probe(pi
, p
, sym
, offset
, modname
, NULL
);
2229 static const struct seq_operations kprobes_seq_ops
= {
2230 .start
= kprobe_seq_start
,
2231 .next
= kprobe_seq_next
,
2232 .stop
= kprobe_seq_stop
,
2233 .show
= show_kprobe_addr
2236 static int __kprobes
kprobes_open(struct inode
*inode
, struct file
*filp
)
2238 return seq_open(filp
, &kprobes_seq_ops
);
2241 static const struct file_operations debugfs_kprobes_operations
= {
2242 .open
= kprobes_open
,
2244 .llseek
= seq_lseek
,
2245 .release
= seq_release
,
2248 static void __kprobes
arm_all_kprobes(void)
2250 struct hlist_head
*head
;
2254 mutex_lock(&kprobe_mutex
);
2256 /* If kprobes are armed, just return */
2257 if (!kprobes_all_disarmed
)
2258 goto already_enabled
;
2260 /* Arming kprobes doesn't optimize kprobe itself */
2261 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
2262 head
= &kprobe_table
[i
];
2263 hlist_for_each_entry_rcu(p
, head
, hlist
)
2264 if (!kprobe_disabled(p
))
2268 kprobes_all_disarmed
= false;
2269 printk(KERN_INFO
"Kprobes globally enabled\n");
2272 mutex_unlock(&kprobe_mutex
);
2276 static void __kprobes
disarm_all_kprobes(void)
2278 struct hlist_head
*head
;
2282 mutex_lock(&kprobe_mutex
);
2284 /* If kprobes are already disarmed, just return */
2285 if (kprobes_all_disarmed
) {
2286 mutex_unlock(&kprobe_mutex
);
2290 kprobes_all_disarmed
= true;
2291 printk(KERN_INFO
"Kprobes globally disabled\n");
2293 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
2294 head
= &kprobe_table
[i
];
2295 hlist_for_each_entry_rcu(p
, head
, hlist
) {
2296 if (!arch_trampoline_kprobe(p
) && !kprobe_disabled(p
))
2297 disarm_kprobe(p
, false);
2300 mutex_unlock(&kprobe_mutex
);
2302 /* Wait for disarming all kprobes by optimizer */
2303 wait_for_kprobe_optimizer();
2307 * XXX: The debugfs bool file interface doesn't allow for callbacks
2308 * when the bool state is switched. We can reuse that facility when
2311 static ssize_t
read_enabled_file_bool(struct file
*file
,
2312 char __user
*user_buf
, size_t count
, loff_t
*ppos
)
2316 if (!kprobes_all_disarmed
)
2322 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
2325 static ssize_t
write_enabled_file_bool(struct file
*file
,
2326 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
2331 buf_size
= min(count
, (sizeof(buf
)-1));
2332 if (copy_from_user(buf
, user_buf
, buf_size
))
2344 disarm_all_kprobes();
2351 static const struct file_operations fops_kp
= {
2352 .read
= read_enabled_file_bool
,
2353 .write
= write_enabled_file_bool
,
2354 .llseek
= default_llseek
,
2357 static int __kprobes
debugfs_kprobe_init(void)
2359 struct dentry
*dir
, *file
;
2360 unsigned int value
= 1;
2362 dir
= debugfs_create_dir("kprobes", NULL
);
2366 file
= debugfs_create_file("list", 0444, dir
, NULL
,
2367 &debugfs_kprobes_operations
);
2369 debugfs_remove(dir
);
2373 file
= debugfs_create_file("enabled", 0600, dir
,
2376 debugfs_remove(dir
);
2383 late_initcall(debugfs_kprobe_init
);
2384 #endif /* CONFIG_DEBUG_FS */
2386 module_init(init_kprobes
);
2388 /* defined in arch/.../kernel/kprobes.c */
2389 EXPORT_SYMBOL_GPL(jprobe_return
);