2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
51 #include <asm-generic/sections.h>
52 #include <asm/cacheflush.h>
53 #include <asm/errno.h>
54 #include <asm/uaccess.h>
56 #define KPROBE_HASH_BITS 6
57 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
61 * Some oddball architectures like 64bit powerpc have function descriptors
62 * so this must be overridable.
64 #ifndef kprobe_lookup_name
65 #define kprobe_lookup_name(name, addr) \
66 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
69 static int kprobes_initialized
;
70 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
71 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
73 /* NOTE: change this value only with kprobe_mutex held */
74 static bool kprobes_all_disarmed
;
76 static DEFINE_MUTEX(kprobe_mutex
); /* Protects kprobe_table */
77 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
79 spinlock_t lock ____cacheline_aligned_in_smp
;
80 } kretprobe_table_locks
[KPROBE_TABLE_SIZE
];
82 static spinlock_t
*kretprobe_table_lock_ptr(unsigned long hash
)
84 return &(kretprobe_table_locks
[hash
].lock
);
88 * Normally, functions that we'd want to prohibit kprobes in, are marked
89 * __kprobes. But, there are cases where such functions already belong to
90 * a different section (__sched for preempt_schedule)
92 * For such cases, we now have a blacklist
94 static struct kprobe_blackpoint kprobe_blacklist
[] = {
95 {"preempt_schedule",},
96 {"native_get_debugreg",},
97 {"irq_entries_start",},
98 {"common_interrupt",},
99 {"mcount",}, /* mcount can be called from everywhere */
100 {NULL
} /* Terminator */
103 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
105 * kprobe->ainsn.insn points to the copy of the instruction to be
106 * single-stepped. x86_64, POWER4 and above have no-exec support and
107 * stepping on the instruction on a vmalloced/kmalloced/data page
108 * is a recipe for disaster
110 struct kprobe_insn_page
{
111 struct list_head list
;
112 kprobe_opcode_t
*insns
; /* Page of instruction slots */
118 #define KPROBE_INSN_PAGE_SIZE(slots) \
119 (offsetof(struct kprobe_insn_page, slot_used) + \
120 (sizeof(char) * (slots)))
122 struct kprobe_insn_cache
{
123 struct list_head pages
; /* list of kprobe_insn_page */
124 size_t insn_size
; /* size of instruction slot */
128 static int slots_per_page(struct kprobe_insn_cache
*c
)
130 return PAGE_SIZE
/(c
->insn_size
* sizeof(kprobe_opcode_t
));
133 enum kprobe_slot_state
{
139 static DEFINE_MUTEX(kprobe_insn_mutex
); /* Protects kprobe_insn_slots */
140 static struct kprobe_insn_cache kprobe_insn_slots
= {
141 .pages
= LIST_HEAD_INIT(kprobe_insn_slots
.pages
),
142 .insn_size
= MAX_INSN_SIZE
,
145 static int __kprobes
collect_garbage_slots(struct kprobe_insn_cache
*c
);
148 * __get_insn_slot() - Find a slot on an executable page for an instruction.
149 * We allocate an executable page if there's no room on existing ones.
151 static kprobe_opcode_t __kprobes
*__get_insn_slot(struct kprobe_insn_cache
*c
)
153 struct kprobe_insn_page
*kip
;
156 list_for_each_entry(kip
, &c
->pages
, list
) {
157 if (kip
->nused
< slots_per_page(c
)) {
159 for (i
= 0; i
< slots_per_page(c
); i
++) {
160 if (kip
->slot_used
[i
] == SLOT_CLEAN
) {
161 kip
->slot_used
[i
] = SLOT_USED
;
163 return kip
->insns
+ (i
* c
->insn_size
);
166 /* kip->nused is broken. Fix it. */
167 kip
->nused
= slots_per_page(c
);
172 /* If there are any garbage slots, collect it and try again. */
173 if (c
->nr_garbage
&& collect_garbage_slots(c
) == 0)
176 /* All out of space. Need to allocate a new page. */
177 kip
= kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c
)), GFP_KERNEL
);
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
186 kip
->insns
= module_alloc(PAGE_SIZE
);
191 INIT_LIST_HEAD(&kip
->list
);
192 memset(kip
->slot_used
, SLOT_CLEAN
, slots_per_page(c
));
193 kip
->slot_used
[0] = SLOT_USED
;
196 list_add(&kip
->list
, &c
->pages
);
201 kprobe_opcode_t __kprobes
*get_insn_slot(void)
203 kprobe_opcode_t
*ret
= NULL
;
205 mutex_lock(&kprobe_insn_mutex
);
206 ret
= __get_insn_slot(&kprobe_insn_slots
);
207 mutex_unlock(&kprobe_insn_mutex
);
212 /* Return 1 if all garbages are collected, otherwise 0. */
213 static int __kprobes
collect_one_slot(struct kprobe_insn_page
*kip
, int idx
)
215 kip
->slot_used
[idx
] = SLOT_CLEAN
;
217 if (kip
->nused
== 0) {
219 * Page is no longer in use. Free it unless
220 * it's the last one. We keep the last one
221 * so as not to have to set it up again the
222 * next time somebody inserts a probe.
224 if (!list_is_singular(&kip
->list
)) {
225 list_del(&kip
->list
);
226 module_free(NULL
, kip
->insns
);
234 static int __kprobes
collect_garbage_slots(struct kprobe_insn_cache
*c
)
236 struct kprobe_insn_page
*kip
, *next
;
238 /* Ensure no-one is interrupted on the garbages */
241 list_for_each_entry_safe(kip
, next
, &c
->pages
, list
) {
243 if (kip
->ngarbage
== 0)
245 kip
->ngarbage
= 0; /* we will collect all garbages */
246 for (i
= 0; i
< slots_per_page(c
); i
++) {
247 if (kip
->slot_used
[i
] == SLOT_DIRTY
&&
248 collect_one_slot(kip
, i
))
256 static void __kprobes
__free_insn_slot(struct kprobe_insn_cache
*c
,
257 kprobe_opcode_t
*slot
, int dirty
)
259 struct kprobe_insn_page
*kip
;
261 list_for_each_entry(kip
, &c
->pages
, list
) {
262 long idx
= ((long)slot
- (long)kip
->insns
) /
263 (c
->insn_size
* sizeof(kprobe_opcode_t
));
264 if (idx
>= 0 && idx
< slots_per_page(c
)) {
265 WARN_ON(kip
->slot_used
[idx
] != SLOT_USED
);
267 kip
->slot_used
[idx
] = SLOT_DIRTY
;
269 if (++c
->nr_garbage
> slots_per_page(c
))
270 collect_garbage_slots(c
);
272 collect_one_slot(kip
, idx
);
276 /* Could not free this slot. */
280 void __kprobes
free_insn_slot(kprobe_opcode_t
* slot
, int dirty
)
282 mutex_lock(&kprobe_insn_mutex
);
283 __free_insn_slot(&kprobe_insn_slots
, slot
, dirty
);
284 mutex_unlock(&kprobe_insn_mutex
);
286 #ifdef CONFIG_OPTPROBES
287 /* For optimized_kprobe buffer */
288 static DEFINE_MUTEX(kprobe_optinsn_mutex
); /* Protects kprobe_optinsn_slots */
289 static struct kprobe_insn_cache kprobe_optinsn_slots
= {
290 .pages
= LIST_HEAD_INIT(kprobe_optinsn_slots
.pages
),
291 /* .insn_size is initialized later */
294 /* Get a slot for optimized_kprobe buffer */
295 kprobe_opcode_t __kprobes
*get_optinsn_slot(void)
297 kprobe_opcode_t
*ret
= NULL
;
299 mutex_lock(&kprobe_optinsn_mutex
);
300 ret
= __get_insn_slot(&kprobe_optinsn_slots
);
301 mutex_unlock(&kprobe_optinsn_mutex
);
306 void __kprobes
free_optinsn_slot(kprobe_opcode_t
* slot
, int dirty
)
308 mutex_lock(&kprobe_optinsn_mutex
);
309 __free_insn_slot(&kprobe_optinsn_slots
, slot
, dirty
);
310 mutex_unlock(&kprobe_optinsn_mutex
);
315 /* We have preemption disabled.. so it is safe to use __ versions */
316 static inline void set_kprobe_instance(struct kprobe
*kp
)
318 __get_cpu_var(kprobe_instance
) = kp
;
321 static inline void reset_kprobe_instance(void)
323 __get_cpu_var(kprobe_instance
) = NULL
;
326 struct kprobe __kprobes
*get_kprobe(void *addr
)
328 struct hlist_head
*head
;
329 struct hlist_node
*node
;
332 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
333 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
341 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
);
343 /* Return true if the kprobe is an aggregator */
344 static inline int kprobe_aggrprobe(struct kprobe
*p
)
346 return p
->pre_handler
== aggr_pre_handler
;
350 * Keep all fields in the kprobe consistent
352 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
354 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
355 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
358 #ifdef CONFIG_OPTPROBES
359 /* NOTE: change this value only with kprobe_mutex held */
360 static bool kprobes_allow_optimization
;
363 * Call all pre_handler on the list, but ignores its return value.
364 * This must be called from arch-dep optimized caller.
366 void __kprobes
opt_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
370 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
371 if (kp
->pre_handler
&& likely(!kprobe_disabled(kp
))) {
372 set_kprobe_instance(kp
);
373 kp
->pre_handler(kp
, regs
);
375 reset_kprobe_instance();
379 /* Return true(!0) if the kprobe is ready for optimization. */
380 static inline int kprobe_optready(struct kprobe
*p
)
382 struct optimized_kprobe
*op
;
384 if (kprobe_aggrprobe(p
)) {
385 op
= container_of(p
, struct optimized_kprobe
, kp
);
386 return arch_prepared_optinsn(&op
->optinsn
);
393 * Return an optimized kprobe whose optimizing code replaces
394 * instructions including addr (exclude breakpoint).
396 struct kprobe
*__kprobes
get_optimized_kprobe(unsigned long addr
)
399 struct kprobe
*p
= NULL
;
400 struct optimized_kprobe
*op
;
402 /* Don't check i == 0, since that is a breakpoint case. */
403 for (i
= 1; !p
&& i
< MAX_OPTIMIZED_LENGTH
; i
++)
404 p
= get_kprobe((void *)(addr
- i
));
406 if (p
&& kprobe_optready(p
)) {
407 op
= container_of(p
, struct optimized_kprobe
, kp
);
408 if (arch_within_optimized_kprobe(op
, addr
))
415 /* Optimization staging list, protected by kprobe_mutex */
416 static LIST_HEAD(optimizing_list
);
418 static void kprobe_optimizer(struct work_struct
*work
);
419 static DECLARE_DELAYED_WORK(optimizing_work
, kprobe_optimizer
);
420 #define OPTIMIZE_DELAY 5
422 /* Kprobe jump optimizer */
423 static __kprobes
void kprobe_optimizer(struct work_struct
*work
)
425 struct optimized_kprobe
*op
, *tmp
;
427 /* Lock modules while optimizing kprobes */
428 mutex_lock(&module_mutex
);
429 mutex_lock(&kprobe_mutex
);
430 if (kprobes_all_disarmed
|| !kprobes_allow_optimization
)
434 * Wait for quiesence period to ensure all running interrupts
435 * are done. Because optprobe may modify multiple instructions
436 * there is a chance that Nth instruction is interrupted. In that
437 * case, running interrupt can return to 2nd-Nth byte of jump
438 * instruction. This wait is for avoiding it.
443 * The optimization/unoptimization refers online_cpus via
444 * stop_machine() and cpu-hotplug modifies online_cpus.
445 * And same time, text_mutex will be held in cpu-hotplug and here.
446 * This combination can cause a deadlock (cpu-hotplug try to lock
447 * text_mutex but stop_machine can not be done because online_cpus
449 * To avoid this deadlock, we need to call get_online_cpus()
450 * for preventing cpu-hotplug outside of text_mutex locking.
453 mutex_lock(&text_mutex
);
454 list_for_each_entry_safe(op
, tmp
, &optimizing_list
, list
) {
455 WARN_ON(kprobe_disabled(&op
->kp
));
456 if (arch_optimize_kprobe(op
) < 0)
457 op
->kp
.flags
&= ~KPROBE_FLAG_OPTIMIZED
;
458 list_del_init(&op
->list
);
460 mutex_unlock(&text_mutex
);
463 mutex_unlock(&kprobe_mutex
);
464 mutex_unlock(&module_mutex
);
467 /* Optimize kprobe if p is ready to be optimized */
468 static __kprobes
void optimize_kprobe(struct kprobe
*p
)
470 struct optimized_kprobe
*op
;
472 /* Check if the kprobe is disabled or not ready for optimization. */
473 if (!kprobe_optready(p
) || !kprobes_allow_optimization
||
474 (kprobe_disabled(p
) || kprobes_all_disarmed
))
477 /* Both of break_handler and post_handler are not supported. */
478 if (p
->break_handler
|| p
->post_handler
)
481 op
= container_of(p
, struct optimized_kprobe
, kp
);
483 /* Check there is no other kprobes at the optimized instructions */
484 if (arch_check_optimized_kprobe(op
) < 0)
487 /* Check if it is already optimized. */
488 if (op
->kp
.flags
& KPROBE_FLAG_OPTIMIZED
)
491 op
->kp
.flags
|= KPROBE_FLAG_OPTIMIZED
;
492 list_add(&op
->list
, &optimizing_list
);
493 if (!delayed_work_pending(&optimizing_work
))
494 schedule_delayed_work(&optimizing_work
, OPTIMIZE_DELAY
);
497 /* Unoptimize a kprobe if p is optimized */
498 static __kprobes
void unoptimize_kprobe(struct kprobe
*p
)
500 struct optimized_kprobe
*op
;
502 if ((p
->flags
& KPROBE_FLAG_OPTIMIZED
) && kprobe_aggrprobe(p
)) {
503 op
= container_of(p
, struct optimized_kprobe
, kp
);
504 if (!list_empty(&op
->list
))
505 /* Dequeue from the optimization queue */
506 list_del_init(&op
->list
);
508 /* Replace jump with break */
509 arch_unoptimize_kprobe(op
);
510 op
->kp
.flags
&= ~KPROBE_FLAG_OPTIMIZED
;
514 /* Remove optimized instructions */
515 static void __kprobes
kill_optimized_kprobe(struct kprobe
*p
)
517 struct optimized_kprobe
*op
;
519 op
= container_of(p
, struct optimized_kprobe
, kp
);
520 if (!list_empty(&op
->list
)) {
521 /* Dequeue from the optimization queue */
522 list_del_init(&op
->list
);
523 op
->kp
.flags
&= ~KPROBE_FLAG_OPTIMIZED
;
525 /* Don't unoptimize, because the target code will be freed. */
526 arch_remove_optimized_kprobe(op
);
529 /* Try to prepare optimized instructions */
530 static __kprobes
void prepare_optimized_kprobe(struct kprobe
*p
)
532 struct optimized_kprobe
*op
;
534 op
= container_of(p
, struct optimized_kprobe
, kp
);
535 arch_prepare_optimized_kprobe(op
);
538 /* Free optimized instructions and optimized_kprobe */
539 static __kprobes
void free_aggr_kprobe(struct kprobe
*p
)
541 struct optimized_kprobe
*op
;
543 op
= container_of(p
, struct optimized_kprobe
, kp
);
544 arch_remove_optimized_kprobe(op
);
548 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
549 static __kprobes
struct kprobe
*alloc_aggr_kprobe(struct kprobe
*p
)
551 struct optimized_kprobe
*op
;
553 op
= kzalloc(sizeof(struct optimized_kprobe
), GFP_KERNEL
);
557 INIT_LIST_HEAD(&op
->list
);
558 op
->kp
.addr
= p
->addr
;
559 arch_prepare_optimized_kprobe(op
);
564 static void __kprobes
init_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
);
567 * Prepare an optimized_kprobe and optimize it
568 * NOTE: p must be a normal registered kprobe
570 static __kprobes
void try_to_optimize_kprobe(struct kprobe
*p
)
573 struct optimized_kprobe
*op
;
575 ap
= alloc_aggr_kprobe(p
);
579 op
= container_of(ap
, struct optimized_kprobe
, kp
);
580 if (!arch_prepared_optinsn(&op
->optinsn
)) {
581 /* If failed to setup optimizing, fallback to kprobe */
582 free_aggr_kprobe(ap
);
586 init_aggr_kprobe(ap
, p
);
591 static void __kprobes
optimize_all_kprobes(void)
593 struct hlist_head
*head
;
594 struct hlist_node
*node
;
598 /* If optimization is already allowed, just return */
599 if (kprobes_allow_optimization
)
602 kprobes_allow_optimization
= true;
603 mutex_lock(&text_mutex
);
604 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
605 head
= &kprobe_table
[i
];
606 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
607 if (!kprobe_disabled(p
))
610 mutex_unlock(&text_mutex
);
611 printk(KERN_INFO
"Kprobes globally optimized\n");
614 static void __kprobes
unoptimize_all_kprobes(void)
616 struct hlist_head
*head
;
617 struct hlist_node
*node
;
621 /* If optimization is already prohibited, just return */
622 if (!kprobes_allow_optimization
)
625 kprobes_allow_optimization
= false;
626 printk(KERN_INFO
"Kprobes globally unoptimized\n");
627 get_online_cpus(); /* For avoiding text_mutex deadlock */
628 mutex_lock(&text_mutex
);
629 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
630 head
= &kprobe_table
[i
];
631 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
632 if (!kprobe_disabled(p
))
633 unoptimize_kprobe(p
);
637 mutex_unlock(&text_mutex
);
639 /* Allow all currently running kprobes to complete */
643 int sysctl_kprobes_optimization
;
644 int proc_kprobes_optimization_handler(struct ctl_table
*table
, int write
,
645 void __user
*buffer
, size_t *length
,
650 mutex_lock(&kprobe_mutex
);
651 sysctl_kprobes_optimization
= kprobes_allow_optimization
? 1 : 0;
652 ret
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
654 if (sysctl_kprobes_optimization
)
655 optimize_all_kprobes();
657 unoptimize_all_kprobes();
658 mutex_unlock(&kprobe_mutex
);
662 #endif /* CONFIG_SYSCTL */
664 static void __kprobes
__arm_kprobe(struct kprobe
*p
)
666 struct kprobe
*old_p
;
668 /* Check collision with other optimized kprobes */
669 old_p
= get_optimized_kprobe((unsigned long)p
->addr
);
671 unoptimize_kprobe(old_p
); /* Fallback to unoptimized kprobe */
674 optimize_kprobe(p
); /* Try to optimize (add kprobe to a list) */
677 static void __kprobes
__disarm_kprobe(struct kprobe
*p
)
679 struct kprobe
*old_p
;
681 unoptimize_kprobe(p
); /* Try to unoptimize */
682 arch_disarm_kprobe(p
);
684 /* If another kprobe was blocked, optimize it. */
685 old_p
= get_optimized_kprobe((unsigned long)p
->addr
);
687 optimize_kprobe(old_p
);
690 #else /* !CONFIG_OPTPROBES */
692 #define optimize_kprobe(p) do {} while (0)
693 #define unoptimize_kprobe(p) do {} while (0)
694 #define kill_optimized_kprobe(p) do {} while (0)
695 #define prepare_optimized_kprobe(p) do {} while (0)
696 #define try_to_optimize_kprobe(p) do {} while (0)
697 #define __arm_kprobe(p) arch_arm_kprobe(p)
698 #define __disarm_kprobe(p) arch_disarm_kprobe(p)
700 static __kprobes
void free_aggr_kprobe(struct kprobe
*p
)
705 static __kprobes
struct kprobe
*alloc_aggr_kprobe(struct kprobe
*p
)
707 return kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
709 #endif /* CONFIG_OPTPROBES */
711 /* Arm a kprobe with text_mutex */
712 static void __kprobes
arm_kprobe(struct kprobe
*kp
)
715 * Here, since __arm_kprobe() doesn't use stop_machine(),
716 * this doesn't cause deadlock on text_mutex. So, we don't
717 * need get_online_cpus().
719 mutex_lock(&text_mutex
);
721 mutex_unlock(&text_mutex
);
724 /* Disarm a kprobe with text_mutex */
725 static void __kprobes
disarm_kprobe(struct kprobe
*kp
)
727 get_online_cpus(); /* For avoiding text_mutex deadlock */
728 mutex_lock(&text_mutex
);
730 mutex_unlock(&text_mutex
);
735 * Aggregate handlers for multiple kprobes support - these handlers
736 * take care of invoking the individual kprobe handlers on p->list
738 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
742 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
743 if (kp
->pre_handler
&& likely(!kprobe_disabled(kp
))) {
744 set_kprobe_instance(kp
);
745 if (kp
->pre_handler(kp
, regs
))
748 reset_kprobe_instance();
753 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
758 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
759 if (kp
->post_handler
&& likely(!kprobe_disabled(kp
))) {
760 set_kprobe_instance(kp
);
761 kp
->post_handler(kp
, regs
, flags
);
762 reset_kprobe_instance();
767 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
770 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
773 * if we faulted "during" the execution of a user specified
774 * probe handler, invoke just that probe's fault handler
776 if (cur
&& cur
->fault_handler
) {
777 if (cur
->fault_handler(cur
, regs
, trapnr
))
783 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
785 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
788 if (cur
&& cur
->break_handler
) {
789 if (cur
->break_handler(cur
, regs
))
792 reset_kprobe_instance();
796 /* Walks the list and increments nmissed count for multiprobe case */
797 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
800 if (!kprobe_aggrprobe(p
)) {
803 list_for_each_entry_rcu(kp
, &p
->list
, list
)
809 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
,
810 struct hlist_head
*head
)
812 struct kretprobe
*rp
= ri
->rp
;
814 /* remove rp inst off the rprobe_inst_table */
815 hlist_del(&ri
->hlist
);
816 INIT_HLIST_NODE(&ri
->hlist
);
818 spin_lock(&rp
->lock
);
819 hlist_add_head(&ri
->hlist
, &rp
->free_instances
);
820 spin_unlock(&rp
->lock
);
823 hlist_add_head(&ri
->hlist
, head
);
826 void __kprobes
kretprobe_hash_lock(struct task_struct
*tsk
,
827 struct hlist_head
**head
, unsigned long *flags
)
829 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
830 spinlock_t
*hlist_lock
;
832 *head
= &kretprobe_inst_table
[hash
];
833 hlist_lock
= kretprobe_table_lock_ptr(hash
);
834 spin_lock_irqsave(hlist_lock
, *flags
);
837 static void __kprobes
kretprobe_table_lock(unsigned long hash
,
838 unsigned long *flags
)
840 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
841 spin_lock_irqsave(hlist_lock
, *flags
);
844 void __kprobes
kretprobe_hash_unlock(struct task_struct
*tsk
,
845 unsigned long *flags
)
847 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
848 spinlock_t
*hlist_lock
;
850 hlist_lock
= kretprobe_table_lock_ptr(hash
);
851 spin_unlock_irqrestore(hlist_lock
, *flags
);
854 void __kprobes
kretprobe_table_unlock(unsigned long hash
, unsigned long *flags
)
856 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
857 spin_unlock_irqrestore(hlist_lock
, *flags
);
861 * This function is called from finish_task_switch when task tk becomes dead,
862 * so that we can recycle any function-return probe instances associated
863 * with this task. These left over instances represent probed functions
864 * that have been called but will never return.
866 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
868 struct kretprobe_instance
*ri
;
869 struct hlist_head
*head
, empty_rp
;
870 struct hlist_node
*node
, *tmp
;
871 unsigned long hash
, flags
= 0;
873 if (unlikely(!kprobes_initialized
))
874 /* Early boot. kretprobe_table_locks not yet initialized. */
877 hash
= hash_ptr(tk
, KPROBE_HASH_BITS
);
878 head
= &kretprobe_inst_table
[hash
];
879 kretprobe_table_lock(hash
, &flags
);
880 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
882 recycle_rp_inst(ri
, &empty_rp
);
884 kretprobe_table_unlock(hash
, &flags
);
885 INIT_HLIST_HEAD(&empty_rp
);
886 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
887 hlist_del(&ri
->hlist
);
892 static inline void free_rp_inst(struct kretprobe
*rp
)
894 struct kretprobe_instance
*ri
;
895 struct hlist_node
*pos
, *next
;
897 hlist_for_each_entry_safe(ri
, pos
, next
, &rp
->free_instances
, hlist
) {
898 hlist_del(&ri
->hlist
);
903 static void __kprobes
cleanup_rp_inst(struct kretprobe
*rp
)
905 unsigned long flags
, hash
;
906 struct kretprobe_instance
*ri
;
907 struct hlist_node
*pos
, *next
;
908 struct hlist_head
*head
;
911 for (hash
= 0; hash
< KPROBE_TABLE_SIZE
; hash
++) {
912 kretprobe_table_lock(hash
, &flags
);
913 head
= &kretprobe_inst_table
[hash
];
914 hlist_for_each_entry_safe(ri
, pos
, next
, head
, hlist
) {
918 kretprobe_table_unlock(hash
, &flags
);
924 * Add the new probe to ap->list. Fail if this is the
925 * second jprobe at the address - two jprobes can't coexist
927 static int __kprobes
add_new_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
929 BUG_ON(kprobe_gone(ap
) || kprobe_gone(p
));
931 if (p
->break_handler
|| p
->post_handler
)
932 unoptimize_kprobe(ap
); /* Fall back to normal kprobe */
934 if (p
->break_handler
) {
935 if (ap
->break_handler
)
937 list_add_tail_rcu(&p
->list
, &ap
->list
);
938 ap
->break_handler
= aggr_break_handler
;
940 list_add_rcu(&p
->list
, &ap
->list
);
941 if (p
->post_handler
&& !ap
->post_handler
)
942 ap
->post_handler
= aggr_post_handler
;
944 if (kprobe_disabled(ap
) && !kprobe_disabled(p
)) {
945 ap
->flags
&= ~KPROBE_FLAG_DISABLED
;
946 if (!kprobes_all_disarmed
)
947 /* Arm the breakpoint again. */
954 * Fill in the required fields of the "manager kprobe". Replace the
955 * earlier kprobe in the hlist with the manager kprobe
957 static void __kprobes
init_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
959 /* Copy p's insn slot to ap */
963 ap
->flags
= p
->flags
& ~KPROBE_FLAG_OPTIMIZED
;
964 ap
->pre_handler
= aggr_pre_handler
;
965 ap
->fault_handler
= aggr_fault_handler
;
966 /* We don't care the kprobe which has gone. */
967 if (p
->post_handler
&& !kprobe_gone(p
))
968 ap
->post_handler
= aggr_post_handler
;
969 if (p
->break_handler
&& !kprobe_gone(p
))
970 ap
->break_handler
= aggr_break_handler
;
972 INIT_LIST_HEAD(&ap
->list
);
973 INIT_HLIST_NODE(&ap
->hlist
);
975 list_add_rcu(&p
->list
, &ap
->list
);
976 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
980 * This is the second or subsequent kprobe at the address - handle
983 static int __kprobes
register_aggr_kprobe(struct kprobe
*old_p
,
987 struct kprobe
*ap
= old_p
;
989 if (!kprobe_aggrprobe(old_p
)) {
990 /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
991 ap
= alloc_aggr_kprobe(old_p
);
994 init_aggr_kprobe(ap
, old_p
);
997 if (kprobe_gone(ap
)) {
999 * Attempting to insert new probe at the same location that
1000 * had a probe in the module vaddr area which already
1001 * freed. So, the instruction slot has already been
1002 * released. We need a new slot for the new probe.
1004 ret
= arch_prepare_kprobe(ap
);
1007 * Even if fail to allocate new slot, don't need to
1008 * free aggr_probe. It will be used next time, or
1009 * freed by unregister_kprobe.
1013 /* Prepare optimized instructions if possible. */
1014 prepare_optimized_kprobe(ap
);
1017 * Clear gone flag to prevent allocating new slot again, and
1018 * set disabled flag because it is not armed yet.
1020 ap
->flags
= (ap
->flags
& ~KPROBE_FLAG_GONE
)
1021 | KPROBE_FLAG_DISABLED
;
1024 /* Copy ap's insn slot to p */
1026 return add_new_kprobe(ap
, p
);
1029 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
1030 static int __kprobes
try_to_disable_aggr_kprobe(struct kprobe
*p
)
1034 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
1035 if (!kprobe_disabled(kp
))
1037 * There is an active probe on the list.
1038 * We can't disable aggr_kprobe.
1042 p
->flags
|= KPROBE_FLAG_DISABLED
;
1046 static int __kprobes
in_kprobes_functions(unsigned long addr
)
1048 struct kprobe_blackpoint
*kb
;
1050 if (addr
>= (unsigned long)__kprobes_text_start
&&
1051 addr
< (unsigned long)__kprobes_text_end
)
1054 * If there exists a kprobe_blacklist, verify and
1055 * fail any probe registration in the prohibited area
1057 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
1058 if (kb
->start_addr
) {
1059 if (addr
>= kb
->start_addr
&&
1060 addr
< (kb
->start_addr
+ kb
->range
))
1068 * If we have a symbol_name argument, look it up and add the offset field
1069 * to it. This way, we can specify a relative address to a symbol.
1071 static kprobe_opcode_t __kprobes
*kprobe_addr(struct kprobe
*p
)
1073 kprobe_opcode_t
*addr
= p
->addr
;
1074 if (p
->symbol_name
) {
1077 kprobe_lookup_name(p
->symbol_name
, addr
);
1082 return (kprobe_opcode_t
*)(((char *)addr
) + p
->offset
);
1085 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1086 static struct kprobe
* __kprobes
__get_valid_kprobe(struct kprobe
*p
)
1088 struct kprobe
*old_p
, *list_p
;
1090 old_p
= get_kprobe(p
->addr
);
1091 if (unlikely(!old_p
))
1095 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
)
1097 /* kprobe p is a valid probe */
1105 /* Return error if the kprobe is being re-registered */
1106 static inline int check_kprobe_rereg(struct kprobe
*p
)
1109 struct kprobe
*old_p
;
1111 mutex_lock(&kprobe_mutex
);
1112 old_p
= __get_valid_kprobe(p
);
1115 mutex_unlock(&kprobe_mutex
);
1119 int __kprobes
register_kprobe(struct kprobe
*p
)
1122 struct kprobe
*old_p
;
1123 struct module
*probed_mod
;
1124 kprobe_opcode_t
*addr
;
1126 addr
= kprobe_addr(p
);
1131 ret
= check_kprobe_rereg(p
);
1136 if (!kernel_text_address((unsigned long) p
->addr
) ||
1137 in_kprobes_functions((unsigned long) p
->addr
) ||
1138 ftrace_text_reserved(p
->addr
, p
->addr
)) {
1143 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1144 p
->flags
&= KPROBE_FLAG_DISABLED
;
1147 * Check if are we probing a module.
1149 probed_mod
= __module_text_address((unsigned long) p
->addr
);
1152 * We must hold a refcount of the probed module while updating
1153 * its code to prohibit unexpected unloading.
1155 if (unlikely(!try_module_get(probed_mod
))) {
1160 * If the module freed .init.text, we couldn't insert
1163 if (within_module_init((unsigned long)p
->addr
, probed_mod
) &&
1164 probed_mod
->state
!= MODULE_STATE_COMING
) {
1165 module_put(probed_mod
);
1173 INIT_LIST_HEAD(&p
->list
);
1174 mutex_lock(&kprobe_mutex
);
1176 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1177 mutex_lock(&text_mutex
);
1179 old_p
= get_kprobe(p
->addr
);
1181 /* Since this may unoptimize old_p, locking text_mutex. */
1182 ret
= register_aggr_kprobe(old_p
, p
);
1186 ret
= arch_prepare_kprobe(p
);
1190 INIT_HLIST_NODE(&p
->hlist
);
1191 hlist_add_head_rcu(&p
->hlist
,
1192 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
1194 if (!kprobes_all_disarmed
&& !kprobe_disabled(p
))
1197 /* Try to optimize kprobe */
1198 try_to_optimize_kprobe(p
);
1201 mutex_unlock(&text_mutex
);
1203 mutex_unlock(&kprobe_mutex
);
1206 module_put(probed_mod
);
1210 EXPORT_SYMBOL_GPL(register_kprobe
);
1213 * Unregister a kprobe without a scheduler synchronization.
1215 static int __kprobes
__unregister_kprobe_top(struct kprobe
*p
)
1217 struct kprobe
*old_p
, *list_p
;
1219 old_p
= __get_valid_kprobe(p
);
1224 (kprobe_aggrprobe(old_p
) &&
1225 list_is_singular(&old_p
->list
))) {
1227 * Only probe on the hash list. Disarm only if kprobes are
1228 * enabled and not gone - otherwise, the breakpoint would
1229 * already have been removed. We save on flushing icache.
1231 if (!kprobes_all_disarmed
&& !kprobe_disabled(old_p
))
1232 disarm_kprobe(old_p
);
1233 hlist_del_rcu(&old_p
->hlist
);
1235 if (p
->break_handler
&& !kprobe_gone(p
))
1236 old_p
->break_handler
= NULL
;
1237 if (p
->post_handler
&& !kprobe_gone(p
)) {
1238 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
) {
1239 if ((list_p
!= p
) && (list_p
->post_handler
))
1242 old_p
->post_handler
= NULL
;
1245 list_del_rcu(&p
->list
);
1246 if (!kprobe_disabled(old_p
)) {
1247 try_to_disable_aggr_kprobe(old_p
);
1248 if (!kprobes_all_disarmed
) {
1249 if (kprobe_disabled(old_p
))
1250 disarm_kprobe(old_p
);
1252 /* Try to optimize this probe again */
1253 optimize_kprobe(old_p
);
1260 static void __kprobes
__unregister_kprobe_bottom(struct kprobe
*p
)
1262 struct kprobe
*old_p
;
1264 if (list_empty(&p
->list
))
1265 arch_remove_kprobe(p
);
1266 else if (list_is_singular(&p
->list
)) {
1267 /* "p" is the last child of an aggr_kprobe */
1268 old_p
= list_entry(p
->list
.next
, struct kprobe
, list
);
1270 arch_remove_kprobe(old_p
);
1271 free_aggr_kprobe(old_p
);
1275 int __kprobes
register_kprobes(struct kprobe
**kps
, int num
)
1281 for (i
= 0; i
< num
; i
++) {
1282 ret
= register_kprobe(kps
[i
]);
1285 unregister_kprobes(kps
, i
);
1291 EXPORT_SYMBOL_GPL(register_kprobes
);
1293 void __kprobes
unregister_kprobe(struct kprobe
*p
)
1295 unregister_kprobes(&p
, 1);
1297 EXPORT_SYMBOL_GPL(unregister_kprobe
);
1299 void __kprobes
unregister_kprobes(struct kprobe
**kps
, int num
)
1305 mutex_lock(&kprobe_mutex
);
1306 for (i
= 0; i
< num
; i
++)
1307 if (__unregister_kprobe_top(kps
[i
]) < 0)
1308 kps
[i
]->addr
= NULL
;
1309 mutex_unlock(&kprobe_mutex
);
1311 synchronize_sched();
1312 for (i
= 0; i
< num
; i
++)
1314 __unregister_kprobe_bottom(kps
[i
]);
1316 EXPORT_SYMBOL_GPL(unregister_kprobes
);
1318 static struct notifier_block kprobe_exceptions_nb
= {
1319 .notifier_call
= kprobe_exceptions_notify
,
1320 .priority
= 0x7fffffff /* we need to be notified first */
1323 unsigned long __weak
arch_deref_entry_point(void *entry
)
1325 return (unsigned long)entry
;
1328 int __kprobes
register_jprobes(struct jprobe
**jps
, int num
)
1335 for (i
= 0; i
< num
; i
++) {
1338 addr
= arch_deref_entry_point(jp
->entry
);
1340 if (!kernel_text_address(addr
))
1343 /* Todo: Verify probepoint is a function entry point */
1344 jp
->kp
.pre_handler
= setjmp_pre_handler
;
1345 jp
->kp
.break_handler
= longjmp_break_handler
;
1346 ret
= register_kprobe(&jp
->kp
);
1350 unregister_jprobes(jps
, i
);
1356 EXPORT_SYMBOL_GPL(register_jprobes
);
1358 int __kprobes
register_jprobe(struct jprobe
*jp
)
1360 return register_jprobes(&jp
, 1);
1362 EXPORT_SYMBOL_GPL(register_jprobe
);
1364 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
1366 unregister_jprobes(&jp
, 1);
1368 EXPORT_SYMBOL_GPL(unregister_jprobe
);
1370 void __kprobes
unregister_jprobes(struct jprobe
**jps
, int num
)
1376 mutex_lock(&kprobe_mutex
);
1377 for (i
= 0; i
< num
; i
++)
1378 if (__unregister_kprobe_top(&jps
[i
]->kp
) < 0)
1379 jps
[i
]->kp
.addr
= NULL
;
1380 mutex_unlock(&kprobe_mutex
);
1382 synchronize_sched();
1383 for (i
= 0; i
< num
; i
++) {
1384 if (jps
[i
]->kp
.addr
)
1385 __unregister_kprobe_bottom(&jps
[i
]->kp
);
1388 EXPORT_SYMBOL_GPL(unregister_jprobes
);
1390 #ifdef CONFIG_KRETPROBES
1392 * This kprobe pre_handler is registered with every kretprobe. When probe
1393 * hits it will set up the return probe.
1395 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1396 struct pt_regs
*regs
)
1398 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
1399 unsigned long hash
, flags
= 0;
1400 struct kretprobe_instance
*ri
;
1402 /*TODO: consider to only swap the RA after the last pre_handler fired */
1403 hash
= hash_ptr(current
, KPROBE_HASH_BITS
);
1404 spin_lock_irqsave(&rp
->lock
, flags
);
1405 if (!hlist_empty(&rp
->free_instances
)) {
1406 ri
= hlist_entry(rp
->free_instances
.first
,
1407 struct kretprobe_instance
, hlist
);
1408 hlist_del(&ri
->hlist
);
1409 spin_unlock_irqrestore(&rp
->lock
, flags
);
1414 if (rp
->entry_handler
&& rp
->entry_handler(ri
, regs
))
1417 arch_prepare_kretprobe(ri
, regs
);
1419 INIT_HLIST_NODE(&ri
->hlist
);
1420 kretprobe_table_lock(hash
, &flags
);
1421 hlist_add_head(&ri
->hlist
, &kretprobe_inst_table
[hash
]);
1422 kretprobe_table_unlock(hash
, &flags
);
1425 spin_unlock_irqrestore(&rp
->lock
, flags
);
1430 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1433 struct kretprobe_instance
*inst
;
1437 if (kretprobe_blacklist_size
) {
1438 addr
= kprobe_addr(&rp
->kp
);
1442 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1443 if (kretprobe_blacklist
[i
].addr
== addr
)
1448 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
1449 rp
->kp
.post_handler
= NULL
;
1450 rp
->kp
.fault_handler
= NULL
;
1451 rp
->kp
.break_handler
= NULL
;
1453 /* Pre-allocate memory for max kretprobe instances */
1454 if (rp
->maxactive
<= 0) {
1455 #ifdef CONFIG_PREEMPT
1456 rp
->maxactive
= max_t(unsigned int, 10, 2*num_possible_cpus());
1458 rp
->maxactive
= num_possible_cpus();
1461 spin_lock_init(&rp
->lock
);
1462 INIT_HLIST_HEAD(&rp
->free_instances
);
1463 for (i
= 0; i
< rp
->maxactive
; i
++) {
1464 inst
= kmalloc(sizeof(struct kretprobe_instance
) +
1465 rp
->data_size
, GFP_KERNEL
);
1470 INIT_HLIST_NODE(&inst
->hlist
);
1471 hlist_add_head(&inst
->hlist
, &rp
->free_instances
);
1475 /* Establish function entry probe point */
1476 ret
= register_kprobe(&rp
->kp
);
1481 EXPORT_SYMBOL_GPL(register_kretprobe
);
1483 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1489 for (i
= 0; i
< num
; i
++) {
1490 ret
= register_kretprobe(rps
[i
]);
1493 unregister_kretprobes(rps
, i
);
1499 EXPORT_SYMBOL_GPL(register_kretprobes
);
1501 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1503 unregister_kretprobes(&rp
, 1);
1505 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1507 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1513 mutex_lock(&kprobe_mutex
);
1514 for (i
= 0; i
< num
; i
++)
1515 if (__unregister_kprobe_top(&rps
[i
]->kp
) < 0)
1516 rps
[i
]->kp
.addr
= NULL
;
1517 mutex_unlock(&kprobe_mutex
);
1519 synchronize_sched();
1520 for (i
= 0; i
< num
; i
++) {
1521 if (rps
[i
]->kp
.addr
) {
1522 __unregister_kprobe_bottom(&rps
[i
]->kp
);
1523 cleanup_rp_inst(rps
[i
]);
1527 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1529 #else /* CONFIG_KRETPROBES */
1530 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1534 EXPORT_SYMBOL_GPL(register_kretprobe
);
1536 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1540 EXPORT_SYMBOL_GPL(register_kretprobes
);
1542 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1545 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1547 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1550 EXPORT_SYMBOL_GPL(unregister_kretprobes
);
1552 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1553 struct pt_regs
*regs
)
1558 #endif /* CONFIG_KRETPROBES */
1560 /* Set the kprobe gone and remove its instruction buffer. */
1561 static void __kprobes
kill_kprobe(struct kprobe
*p
)
1565 p
->flags
|= KPROBE_FLAG_GONE
;
1566 if (kprobe_aggrprobe(p
)) {
1568 * If this is an aggr_kprobe, we have to list all the
1569 * chained probes and mark them GONE.
1571 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1572 kp
->flags
|= KPROBE_FLAG_GONE
;
1573 p
->post_handler
= NULL
;
1574 p
->break_handler
= NULL
;
1575 kill_optimized_kprobe(p
);
1578 * Here, we can remove insn_slot safely, because no thread calls
1579 * the original probed function (which will be freed soon) any more.
1581 arch_remove_kprobe(p
);
1584 /* Disable one kprobe */
1585 int __kprobes
disable_kprobe(struct kprobe
*kp
)
1590 mutex_lock(&kprobe_mutex
);
1592 /* Check whether specified probe is valid. */
1593 p
= __get_valid_kprobe(kp
);
1594 if (unlikely(p
== NULL
)) {
1599 /* If the probe is already disabled (or gone), just return */
1600 if (kprobe_disabled(kp
))
1603 kp
->flags
|= KPROBE_FLAG_DISABLED
;
1605 /* When kp != p, p is always enabled. */
1606 try_to_disable_aggr_kprobe(p
);
1608 if (!kprobes_all_disarmed
&& kprobe_disabled(p
))
1611 mutex_unlock(&kprobe_mutex
);
1614 EXPORT_SYMBOL_GPL(disable_kprobe
);
1616 /* Enable one kprobe */
1617 int __kprobes
enable_kprobe(struct kprobe
*kp
)
1622 mutex_lock(&kprobe_mutex
);
1624 /* Check whether specified probe is valid. */
1625 p
= __get_valid_kprobe(kp
);
1626 if (unlikely(p
== NULL
)) {
1631 if (kprobe_gone(kp
)) {
1632 /* This kprobe has gone, we couldn't enable it. */
1638 kp
->flags
&= ~KPROBE_FLAG_DISABLED
;
1640 if (!kprobes_all_disarmed
&& kprobe_disabled(p
)) {
1641 p
->flags
&= ~KPROBE_FLAG_DISABLED
;
1645 mutex_unlock(&kprobe_mutex
);
1648 EXPORT_SYMBOL_GPL(enable_kprobe
);
1650 void __kprobes
dump_kprobe(struct kprobe
*kp
)
1652 printk(KERN_WARNING
"Dumping kprobe:\n");
1653 printk(KERN_WARNING
"Name: %s\nAddress: %p\nOffset: %x\n",
1654 kp
->symbol_name
, kp
->addr
, kp
->offset
);
1657 /* Module notifier call back, checking kprobes on the module */
1658 static int __kprobes
kprobes_module_callback(struct notifier_block
*nb
,
1659 unsigned long val
, void *data
)
1661 struct module
*mod
= data
;
1662 struct hlist_head
*head
;
1663 struct hlist_node
*node
;
1666 int checkcore
= (val
== MODULE_STATE_GOING
);
1668 if (val
!= MODULE_STATE_GOING
&& val
!= MODULE_STATE_LIVE
)
1672 * When MODULE_STATE_GOING was notified, both of module .text and
1673 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1674 * notified, only .init.text section would be freed. We need to
1675 * disable kprobes which have been inserted in the sections.
1677 mutex_lock(&kprobe_mutex
);
1678 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1679 head
= &kprobe_table
[i
];
1680 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
1681 if (within_module_init((unsigned long)p
->addr
, mod
) ||
1683 within_module_core((unsigned long)p
->addr
, mod
))) {
1685 * The vaddr this probe is installed will soon
1686 * be vfreed buy not synced to disk. Hence,
1687 * disarming the breakpoint isn't needed.
1692 mutex_unlock(&kprobe_mutex
);
1696 static struct notifier_block kprobe_module_nb
= {
1697 .notifier_call
= kprobes_module_callback
,
1701 static int __init
init_kprobes(void)
1704 unsigned long offset
= 0, size
= 0;
1705 char *modname
, namebuf
[128];
1706 const char *symbol_name
;
1708 struct kprobe_blackpoint
*kb
;
1710 /* initialize all list heads */
1711 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1712 INIT_HLIST_HEAD(&kprobe_table
[i
]);
1713 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
1714 spin_lock_init(&(kretprobe_table_locks
[i
].lock
));
1718 * Lookup and populate the kprobe_blacklist.
1720 * Unlike the kretprobe blacklist, we'll need to determine
1721 * the range of addresses that belong to the said functions,
1722 * since a kprobe need not necessarily be at the beginning
1725 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
1726 kprobe_lookup_name(kb
->name
, addr
);
1730 kb
->start_addr
= (unsigned long)addr
;
1731 symbol_name
= kallsyms_lookup(kb
->start_addr
,
1732 &size
, &offset
, &modname
, namebuf
);
1739 if (kretprobe_blacklist_size
) {
1740 /* lookup the function address from its name */
1741 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1742 kprobe_lookup_name(kretprobe_blacklist
[i
].name
,
1743 kretprobe_blacklist
[i
].addr
);
1744 if (!kretprobe_blacklist
[i
].addr
)
1745 printk("kretprobe: lookup failed: %s\n",
1746 kretprobe_blacklist
[i
].name
);
1750 #if defined(CONFIG_OPTPROBES)
1751 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1752 /* Init kprobe_optinsn_slots */
1753 kprobe_optinsn_slots
.insn_size
= MAX_OPTINSN_SIZE
;
1755 /* By default, kprobes can be optimized */
1756 kprobes_allow_optimization
= true;
1759 /* By default, kprobes are armed */
1760 kprobes_all_disarmed
= false;
1762 err
= arch_init_kprobes();
1764 err
= register_die_notifier(&kprobe_exceptions_nb
);
1766 err
= register_module_notifier(&kprobe_module_nb
);
1768 kprobes_initialized
= (err
== 0);
1775 #ifdef CONFIG_DEBUG_FS
1776 static void __kprobes
report_probe(struct seq_file
*pi
, struct kprobe
*p
,
1777 const char *sym
, int offset
, char *modname
, struct kprobe
*pp
)
1781 if (p
->pre_handler
== pre_handler_kretprobe
)
1783 else if (p
->pre_handler
== setjmp_pre_handler
)
1789 seq_printf(pi
, "%p %s %s+0x%x %s ",
1790 p
->addr
, kprobe_type
, sym
, offset
,
1791 (modname
? modname
: " "));
1793 seq_printf(pi
, "%p %s %p ",
1794 p
->addr
, kprobe_type
, p
->addr
);
1798 seq_printf(pi
, "%s%s%s\n",
1799 (kprobe_gone(p
) ? "[GONE]" : ""),
1800 ((kprobe_disabled(p
) && !kprobe_gone(p
)) ? "[DISABLED]" : ""),
1801 (kprobe_optimized(pp
) ? "[OPTIMIZED]" : ""));
1804 static void __kprobes
*kprobe_seq_start(struct seq_file
*f
, loff_t
*pos
)
1806 return (*pos
< KPROBE_TABLE_SIZE
) ? pos
: NULL
;
1809 static void __kprobes
*kprobe_seq_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
1812 if (*pos
>= KPROBE_TABLE_SIZE
)
1817 static void __kprobes
kprobe_seq_stop(struct seq_file
*f
, void *v
)
1822 static int __kprobes
show_kprobe_addr(struct seq_file
*pi
, void *v
)
1824 struct hlist_head
*head
;
1825 struct hlist_node
*node
;
1826 struct kprobe
*p
, *kp
;
1827 const char *sym
= NULL
;
1828 unsigned int i
= *(loff_t
*) v
;
1829 unsigned long offset
= 0;
1830 char *modname
, namebuf
[128];
1832 head
= &kprobe_table
[i
];
1834 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1835 sym
= kallsyms_lookup((unsigned long)p
->addr
, NULL
,
1836 &offset
, &modname
, namebuf
);
1837 if (kprobe_aggrprobe(p
)) {
1838 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1839 report_probe(pi
, kp
, sym
, offset
, modname
, p
);
1841 report_probe(pi
, p
, sym
, offset
, modname
, NULL
);
1847 static const struct seq_operations kprobes_seq_ops
= {
1848 .start
= kprobe_seq_start
,
1849 .next
= kprobe_seq_next
,
1850 .stop
= kprobe_seq_stop
,
1851 .show
= show_kprobe_addr
1854 static int __kprobes
kprobes_open(struct inode
*inode
, struct file
*filp
)
1856 return seq_open(filp
, &kprobes_seq_ops
);
1859 static const struct file_operations debugfs_kprobes_operations
= {
1860 .open
= kprobes_open
,
1862 .llseek
= seq_lseek
,
1863 .release
= seq_release
,
1866 static void __kprobes
arm_all_kprobes(void)
1868 struct hlist_head
*head
;
1869 struct hlist_node
*node
;
1873 mutex_lock(&kprobe_mutex
);
1875 /* If kprobes are armed, just return */
1876 if (!kprobes_all_disarmed
)
1877 goto already_enabled
;
1879 /* Arming kprobes doesn't optimize kprobe itself */
1880 mutex_lock(&text_mutex
);
1881 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1882 head
= &kprobe_table
[i
];
1883 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
1884 if (!kprobe_disabled(p
))
1887 mutex_unlock(&text_mutex
);
1889 kprobes_all_disarmed
= false;
1890 printk(KERN_INFO
"Kprobes globally enabled\n");
1893 mutex_unlock(&kprobe_mutex
);
1897 static void __kprobes
disarm_all_kprobes(void)
1899 struct hlist_head
*head
;
1900 struct hlist_node
*node
;
1904 mutex_lock(&kprobe_mutex
);
1906 /* If kprobes are already disarmed, just return */
1907 if (kprobes_all_disarmed
)
1908 goto already_disabled
;
1910 kprobes_all_disarmed
= true;
1911 printk(KERN_INFO
"Kprobes globally disabled\n");
1914 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
1915 * because disarming may also unoptimize kprobes.
1918 mutex_lock(&text_mutex
);
1919 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1920 head
= &kprobe_table
[i
];
1921 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1922 if (!arch_trampoline_kprobe(p
) && !kprobe_disabled(p
))
1927 mutex_unlock(&text_mutex
);
1929 mutex_unlock(&kprobe_mutex
);
1930 /* Allow all currently running kprobes to complete */
1931 synchronize_sched();
1935 mutex_unlock(&kprobe_mutex
);
1939 static ssize_t
read_enabled_file_bool(struct file
*file
,
1940 char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1944 if (!kprobes_all_disarmed
)
1950 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
1953 static ssize_t
write_enabled_file_bool(struct file
*file
,
1954 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1959 buf_size
= min(count
, (sizeof(buf
)-1));
1960 if (copy_from_user(buf
, user_buf
, buf_size
))
1972 disarm_all_kprobes();
1979 static const struct file_operations fops_kp
= {
1980 .read
= read_enabled_file_bool
,
1981 .write
= write_enabled_file_bool
,
1984 static int __kprobes
debugfs_kprobe_init(void)
1986 struct dentry
*dir
, *file
;
1987 unsigned int value
= 1;
1989 dir
= debugfs_create_dir("kprobes", NULL
);
1993 file
= debugfs_create_file("list", 0444, dir
, NULL
,
1994 &debugfs_kprobes_operations
);
1996 debugfs_remove(dir
);
2000 file
= debugfs_create_file("enabled", 0600, dir
,
2003 debugfs_remove(dir
);
2010 late_initcall(debugfs_kprobe_init
);
2011 #endif /* CONFIG_DEBUG_FS */
2013 module_init(init_kprobes
);
2015 /* defined in arch/.../kernel/kprobes.c */
2016 EXPORT_SYMBOL_GPL(jprobe_return
);