2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
38 #include <asm/uaccess.h>
40 void jprobe_return_end(void);
42 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
43 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
45 /* insert a jmp code */
46 static __always_inline
void set_jmp_op(void *from
, void *to
)
48 struct __arch_jmp_op
{
51 } __attribute__((packed
)) *jop
;
52 jop
= (struct __arch_jmp_op
*)from
;
53 jop
->raddr
= (long)(to
) - ((long)(from
) + 5);
54 jop
->op
= RELATIVEJUMP_INSTRUCTION
;
58 * returns non-zero if opcodes can be boosted.
60 static __always_inline
int can_boost(kprobe_opcode_t opcode
)
62 switch (opcode
& 0xf0 ) {
64 return 0; /* can't boost conditional jump */
66 /* can't boost call and pushf */
67 return opcode
!= 0x9a && opcode
!= 0x9c;
69 /* can't boost undefined opcodes and soft-interruptions */
70 return (0xc1 < opcode
&& opcode
< 0xc6) ||
71 (0xc7 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
73 /* can boost AA* and XLAT */
74 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
76 /* can boost in/out and (may be) jmps */
77 return (0xe3 < opcode
&& opcode
!= 0xe8);
79 /* clear and set flags can be boost */
80 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
82 /* currently, can't boost 2 bytes opcodes */
83 return opcode
!= 0x0f;
89 * returns non-zero if opcode modifies the interrupt flag.
91 static int __kprobes
is_IF_modifier(kprobe_opcode_t opcode
)
96 case 0xcf: /* iret/iretd */
97 case 0x9d: /* popf/popfd */
103 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
105 /* insn: must be on special executable page on i386. */
106 p
->ainsn
.insn
= get_insn_slot();
110 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
111 p
->opcode
= *p
->addr
;
112 if (can_boost(p
->opcode
)) {
113 p
->ainsn
.boostable
= 0;
115 p
->ainsn
.boostable
= -1;
120 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
122 *p
->addr
= BREAKPOINT_INSTRUCTION
;
123 flush_icache_range((unsigned long) p
->addr
,
124 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
127 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
129 *p
->addr
= p
->opcode
;
130 flush_icache_range((unsigned long) p
->addr
,
131 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
134 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
136 mutex_lock(&kprobe_mutex
);
137 free_insn_slot(p
->ainsn
.insn
);
138 mutex_unlock(&kprobe_mutex
);
141 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
143 kcb
->prev_kprobe
.kp
= kprobe_running();
144 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
145 kcb
->prev_kprobe
.old_eflags
= kcb
->kprobe_old_eflags
;
146 kcb
->prev_kprobe
.saved_eflags
= kcb
->kprobe_saved_eflags
;
149 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
151 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
152 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
153 kcb
->kprobe_old_eflags
= kcb
->prev_kprobe
.old_eflags
;
154 kcb
->kprobe_saved_eflags
= kcb
->prev_kprobe
.saved_eflags
;
157 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
158 struct kprobe_ctlblk
*kcb
)
160 __get_cpu_var(current_kprobe
) = p
;
161 kcb
->kprobe_saved_eflags
= kcb
->kprobe_old_eflags
162 = (regs
->eflags
& (TF_MASK
| IF_MASK
));
163 if (is_IF_modifier(p
->opcode
))
164 kcb
->kprobe_saved_eflags
&= ~IF_MASK
;
167 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
169 regs
->eflags
|= TF_MASK
;
170 regs
->eflags
&= ~IF_MASK
;
171 /*single step inline if the instruction is an int3*/
172 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
173 regs
->eip
= (unsigned long)p
->addr
;
175 regs
->eip
= (unsigned long)p
->ainsn
.insn
;
178 /* Called with kretprobe_lock held */
179 void __kprobes
arch_prepare_kretprobe(struct kretprobe
*rp
,
180 struct pt_regs
*regs
)
182 unsigned long *sara
= (unsigned long *)®s
->esp
;
183 struct kretprobe_instance
*ri
;
185 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
188 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
190 /* Replace the return addr with trampoline addr */
191 *sara
= (unsigned long) &kretprobe_trampoline
;
200 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
201 * remain disabled thorough out this function.
203 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
207 kprobe_opcode_t
*addr
;
208 struct kprobe_ctlblk
*kcb
;
209 #ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count
= preempt_count();
211 #endif /* CONFIG_PREEMPT */
213 addr
= (kprobe_opcode_t
*)(regs
->eip
- sizeof(kprobe_opcode_t
));
216 * We don't want to be preempted for the entire
217 * duration of kprobe processing
220 kcb
= get_kprobe_ctlblk();
222 /* Check we're not actually recursing */
223 if (kprobe_running()) {
224 p
= get_kprobe(addr
);
226 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
227 *p
->ainsn
.insn
== BREAKPOINT_INSTRUCTION
) {
228 regs
->eflags
&= ~TF_MASK
;
229 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
232 /* We have reentered the kprobe_handler(), since
233 * another probe was hit while within the handler.
234 * We here save the original kprobes variables and
235 * just single step on the instruction of the new probe
236 * without calling any user handlers.
238 save_previous_kprobe(kcb
);
239 set_current_kprobe(p
, regs
, kcb
);
240 kprobes_inc_nmissed_count(p
);
241 prepare_singlestep(p
, regs
);
242 kcb
->kprobe_status
= KPROBE_REENTER
;
245 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
246 /* The breakpoint instruction was removed by
247 * another cpu right after we hit, no further
248 * handling of this interrupt is appropriate
250 regs
->eip
-= sizeof(kprobe_opcode_t
);
254 p
= __get_cpu_var(current_kprobe
);
255 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
262 p
= get_kprobe(addr
);
264 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
266 * The breakpoint instruction was removed right
267 * after we hit it. Another cpu has removed
268 * either a probepoint or a debugger breakpoint
269 * at this address. In either case, no further
270 * handling of this interrupt is appropriate.
271 * Back up over the (now missing) int3 and run
272 * the original instruction.
274 regs
->eip
-= sizeof(kprobe_opcode_t
);
277 /* Not one of ours: let kernel handle it */
281 set_current_kprobe(p
, regs
, kcb
);
282 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
284 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
285 /* handler has already set things up, so skip ss setup */
288 if (p
->ainsn
.boostable
== 1 &&
289 #ifdef CONFIG_PREEMPT
290 !(pre_preempt_count
) && /*
291 * This enables booster when the direct
292 * execution path aren't preempted.
294 #endif /* CONFIG_PREEMPT */
295 !p
->post_handler
&& !p
->break_handler
) {
296 /* Boost up -- we can execute copied instructions directly */
297 reset_current_kprobe();
298 regs
->eip
= (unsigned long)p
->ainsn
.insn
;
299 preempt_enable_no_resched();
304 prepare_singlestep(p
, regs
);
305 kcb
->kprobe_status
= KPROBE_HIT_SS
;
309 preempt_enable_no_resched();
314 * For function-return probes, init_kprobes() establishes a probepoint
315 * here. When a retprobed function returns, this probe is hit and
316 * trampoline_probe_handler() runs, calling the kretprobe's handler.
318 void __kprobes
kretprobe_trampoline_holder(void)
320 asm volatile ( ".global kretprobe_trampoline\n"
321 "kretprobe_trampoline: \n"
323 /* skip cs, eip, orig_eax, es, ds */
333 " call trampoline_handler\n"
334 /* move eflags to cs */
335 " movl 48(%esp), %edx\n"
336 " movl %edx, 44(%esp)\n"
337 /* save true return address on eflags */
338 " movl %eax, 48(%esp)\n"
346 /* skip eip, orig_eax, es, ds */
353 * Called from kretprobe_trampoline
355 fastcall
void *__kprobes
trampoline_handler(struct pt_regs
*regs
)
357 struct kretprobe_instance
*ri
= NULL
;
358 struct hlist_head
*head
;
359 struct hlist_node
*node
, *tmp
;
360 unsigned long flags
, orig_ret_address
= 0;
361 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
363 spin_lock_irqsave(&kretprobe_lock
, flags
);
364 head
= kretprobe_inst_table_head(current
);
367 * It is possible to have multiple instances associated with a given
368 * task either because an multiple functions in the call path
369 * have a return probe installed on them, and/or more then one return
370 * return probe was registered for a target function.
372 * We can handle this because:
373 * - instances are always inserted at the head of the list
374 * - when multiple return probes are registered for the same
375 * function, the first instance's ret_addr will point to the
376 * real return address, and all the rest will point to
377 * kretprobe_trampoline
379 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
380 if (ri
->task
!= current
)
381 /* another task is sharing our hash bucket */
384 if (ri
->rp
&& ri
->rp
->handler
){
385 __get_cpu_var(current_kprobe
) = &ri
->rp
->kp
;
386 ri
->rp
->handler(ri
, regs
);
387 __get_cpu_var(current_kprobe
) = NULL
;
390 orig_ret_address
= (unsigned long)ri
->ret_addr
;
393 if (orig_ret_address
!= trampoline_address
)
395 * This is the real return address. Any other
396 * instances associated with this task are for
397 * other calls deeper on the call stack
402 BUG_ON(!orig_ret_address
|| (orig_ret_address
== trampoline_address
));
404 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
406 return (void*)orig_ret_address
;
410 * Called after single-stepping. p->addr is the address of the
411 * instruction whose first byte has been replaced by the "int 3"
412 * instruction. To avoid the SMP problems that can occur when we
413 * temporarily put back the original opcode to single-step, we
414 * single-stepped a copy of the instruction. The address of this
415 * copy is p->ainsn.insn.
417 * This function prepares to return from the post-single-step
418 * interrupt. We have to fix up the stack as follows:
420 * 0) Except in the case of absolute or indirect jump or call instructions,
421 * the new eip is relative to the copied instruction. We need to make
422 * it relative to the original instruction.
424 * 1) If the single-stepped instruction was pushfl, then the TF and IF
425 * flags are set in the just-pushed eflags, and may need to be cleared.
427 * 2) If the single-stepped instruction was a call, the return address
428 * that is atop the stack is the address following the copied instruction.
429 * We need to make it the address following the original instruction.
431 * This function also checks instruction size for preparing direct execution.
433 static void __kprobes
resume_execution(struct kprobe
*p
,
434 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
436 unsigned long *tos
= (unsigned long *)®s
->esp
;
437 unsigned long copy_eip
= (unsigned long)p
->ainsn
.insn
;
438 unsigned long orig_eip
= (unsigned long)p
->addr
;
440 regs
->eflags
&= ~TF_MASK
;
441 switch (p
->ainsn
.insn
[0]) {
442 case 0x9c: /* pushfl */
443 *tos
&= ~(TF_MASK
| IF_MASK
);
444 *tos
|= kcb
->kprobe_old_eflags
;
446 case 0xc3: /* ret/lret */
450 case 0xea: /* jmp absolute -- eip is correct */
451 /* eip is already adjusted, no more changes required */
452 p
->ainsn
.boostable
= 1;
454 case 0xe8: /* call relative - Fix return addr */
455 *tos
= orig_eip
+ (*tos
- copy_eip
);
458 if ((p
->ainsn
.insn
[1] & 0x30) == 0x10) {
459 /* call absolute, indirect */
461 * Fix return addr; eip is correct.
462 * But this is not boostable
464 *tos
= orig_eip
+ (*tos
- copy_eip
);
466 } else if (((p
->ainsn
.insn
[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
467 ((p
->ainsn
.insn
[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
468 /* eip is correct. And this is boostable */
469 p
->ainsn
.boostable
= 1;
476 if (p
->ainsn
.boostable
== 0) {
477 if ((regs
->eip
> copy_eip
) &&
478 (regs
->eip
- copy_eip
) + 5 < MAX_INSN_SIZE
) {
480 * These instructions can be executed directly if it
481 * jumps back to correct address.
483 set_jmp_op((void *)regs
->eip
,
484 (void *)orig_eip
+ (regs
->eip
- copy_eip
));
485 p
->ainsn
.boostable
= 1;
487 p
->ainsn
.boostable
= -1;
491 regs
->eip
= orig_eip
+ (regs
->eip
- copy_eip
);
498 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
499 * remain disabled thoroughout this function.
501 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
503 struct kprobe
*cur
= kprobe_running();
504 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
509 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
510 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
511 cur
->post_handler(cur
, regs
, 0);
514 resume_execution(cur
, regs
, kcb
);
515 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
517 /*Restore back the original saved kprobes variables and continue. */
518 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
519 restore_previous_kprobe(kcb
);
522 reset_current_kprobe();
524 preempt_enable_no_resched();
527 * if somebody else is singlestepping across a probe point, eflags
528 * will have TF set, in which case, continue the remaining processing
529 * of do_debug, as if this is not a probe hit.
531 if (regs
->eflags
& TF_MASK
)
537 static int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
539 struct kprobe
*cur
= kprobe_running();
540 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
542 switch(kcb
->kprobe_status
) {
546 * We are here because the instruction being single
547 * stepped caused a page fault. We reset the current
548 * kprobe and the eip points back to the probe address
549 * and allow the page fault handler to continue as a
552 regs
->eip
= (unsigned long)cur
->addr
;
553 regs
->eflags
|= kcb
->kprobe_old_eflags
;
554 if (kcb
->kprobe_status
== KPROBE_REENTER
)
555 restore_previous_kprobe(kcb
);
557 reset_current_kprobe();
558 preempt_enable_no_resched();
560 case KPROBE_HIT_ACTIVE
:
561 case KPROBE_HIT_SSDONE
:
563 * We increment the nmissed count for accounting,
564 * we can also use npre/npostfault count for accouting
565 * these specific fault cases.
567 kprobes_inc_nmissed_count(cur
);
570 * We come here because instructions in the pre/post
571 * handler caused the page_fault, this could happen
572 * if handler tries to access user space by
573 * copy_from_user(), get_user() etc. Let the
574 * user-specified handler try to fix it first.
576 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
580 * In case the user-specified fault handler returned
581 * zero, try to fix up.
583 if (fixup_exception(regs
))
587 * fixup_exception() could not handle it,
588 * Let do_page_fault() fix it.
598 * Wrapper routine to for handling exceptions.
600 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
601 unsigned long val
, void *data
)
603 struct die_args
*args
= (struct die_args
*)data
;
604 int ret
= NOTIFY_DONE
;
606 if (args
->regs
&& user_mode(args
->regs
))
611 if (kprobe_handler(args
->regs
))
615 if (post_kprobe_handler(args
->regs
))
620 /* kprobe_running() needs smp_processor_id() */
622 if (kprobe_running() &&
623 kprobe_fault_handler(args
->regs
, args
->trapnr
))
633 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
635 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
637 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
639 kcb
->jprobe_saved_regs
= *regs
;
640 kcb
->jprobe_saved_esp
= ®s
->esp
;
641 addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
644 * TBD: As Linus pointed out, gcc assumes that the callee
645 * owns the argument space and could overwrite it, e.g.
646 * tailcall optimization. So, to be absolutely safe
647 * we also save and restore enough stack bytes to cover
650 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
651 MIN_STACK_SIZE(addr
));
652 regs
->eflags
&= ~IF_MASK
;
653 regs
->eip
= (unsigned long)(jp
->entry
);
657 void __kprobes
jprobe_return(void)
659 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
661 asm volatile (" xchgl %%ebx,%%esp \n"
663 " .globl jprobe_return_end \n"
664 " jprobe_return_end: \n"
666 (kcb
->jprobe_saved_esp
):"memory");
669 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
671 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
672 u8
*addr
= (u8
*) (regs
->eip
- 1);
673 unsigned long stack_addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
674 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
676 if ((addr
> (u8
*) jprobe_return
) && (addr
< (u8
*) jprobe_return_end
)) {
677 if (®s
->esp
!= kcb
->jprobe_saved_esp
) {
678 struct pt_regs
*saved_regs
=
679 container_of(kcb
->jprobe_saved_esp
,
680 struct pt_regs
, esp
);
681 printk("current esp %p does not match saved esp %p\n",
682 ®s
->esp
, kcb
->jprobe_saved_esp
);
683 printk("Saved registers for jprobe %p\n", jp
);
684 show_registers(saved_regs
);
685 printk("Current registers\n");
686 show_registers(regs
);
689 *regs
= kcb
->jprobe_saved_regs
;
690 memcpy((kprobe_opcode_t
*) stack_addr
, kcb
->jprobes_stack
,
691 MIN_STACK_SIZE(stack_addr
));
692 preempt_enable_no_resched();
698 int __init
arch_init_kprobes(void)