2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
43 #include <linux/kprobes.h>
44 #include <linux/ptrace.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/hardirq.h>
48 #include <linux/preempt.h>
49 #include <linux/module.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
54 #include <asm/cacheflush.h>
56 #include <asm/pgtable.h>
57 #include <asm/uaccess.h>
58 #include <asm/alternative.h>
60 #include <asm/debugreg.h>
62 void jprobe_return_end(void);
64 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
65 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
67 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
69 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
70 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
71 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
72 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
73 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
76 * Undefined/reserved opcodes, conditional jump, Opcode Extension
77 * Groups, and some special opcodes can not boost.
79 static const u32 twobyte_is_boostable
[256 / 32] = {
80 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
81 /* ---------------------------------------------- */
82 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
83 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
84 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
85 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
86 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
87 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
88 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
89 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
90 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
91 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
92 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
93 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
94 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
95 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
96 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
97 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
98 /* ----------------------------------------------- */
99 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
103 struct kretprobe_blackpoint kretprobe_blacklist
[] = {
104 {"__switch_to", }, /* This function switches only current task, but
105 doesn't switch kernel stack.*/
106 {NULL
, NULL
} /* Terminator */
108 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
110 static void __kprobes
__synthesize_relative_insn(void *from
, void *to
, u8 op
)
112 struct __arch_relative_insn
{
115 } __attribute__((packed
)) *insn
;
117 insn
= (struct __arch_relative_insn
*)from
;
118 insn
->raddr
= (s32
)((long)(to
) - ((long)(from
) + 5));
122 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
123 static void __kprobes
synthesize_reljump(void *from
, void *to
)
125 __synthesize_relative_insn(from
, to
, RELATIVEJUMP_OPCODE
);
129 * Check for the REX prefix which can only exist on X86_64
130 * X86_32 always returns 0
132 static int __kprobes
is_REX_prefix(kprobe_opcode_t
*insn
)
135 if ((*insn
& 0xf0) == 0x40)
142 * Returns non-zero if opcode is boostable.
143 * RIP relative instructions are adjusted at copying time in 64 bits mode
145 static int __kprobes
can_boost(kprobe_opcode_t
*opcodes
)
147 kprobe_opcode_t opcode
;
148 kprobe_opcode_t
*orig_opcodes
= opcodes
;
150 if (search_exception_tables((unsigned long)opcodes
))
151 return 0; /* Page fault may occur on this address. */
154 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
156 opcode
= *(opcodes
++);
158 /* 2nd-byte opcode */
159 if (opcode
== 0x0f) {
160 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
162 return test_bit(*opcodes
,
163 (unsigned long *)twobyte_is_boostable
);
166 switch (opcode
& 0xf0) {
169 goto retry
; /* REX prefix is boostable */
172 if (0x63 < opcode
&& opcode
< 0x67)
173 goto retry
; /* prefixes */
174 /* can't boost Address-size override and bound */
175 return (opcode
!= 0x62 && opcode
!= 0x67);
177 return 0; /* can't boost conditional jump */
179 /* can't boost software-interruptions */
180 return (0xc1 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
182 /* can boost AA* and XLAT */
183 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
185 /* can boost in/out and absolute jmps */
186 return ((opcode
& 0x04) || opcode
== 0xea);
188 if ((opcode
& 0x0c) == 0 && opcode
!= 0xf1)
189 goto retry
; /* lock/rep(ne) prefix */
190 /* clear and set flags are boostable */
191 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
193 /* segment override prefixes are boostable */
194 if (opcode
== 0x26 || opcode
== 0x36 || opcode
== 0x3e)
195 goto retry
; /* prefixes */
196 /* CS override prefix and call are not boostable */
197 return (opcode
!= 0x2e && opcode
!= 0x9a);
201 /* Recover the probed instruction at addr for further analysis. */
202 static int recover_probed_instruction(kprobe_opcode_t
*buf
, unsigned long addr
)
205 kp
= get_kprobe((void *)addr
);
210 * Basically, kp->ainsn.insn has an original instruction.
211 * However, RIP-relative instruction can not do single-stepping
212 * at different place, __copy_instruction() tweaks the displacement of
213 * that instruction. In that case, we can't recover the instruction
214 * from the kp->ainsn.insn.
216 * On the other hand, kp->opcode has a copy of the first byte of
217 * the probed instruction, which is overwritten by int3. And
218 * the instruction at kp->addr is not modified by kprobes except
219 * for the first byte, we can recover the original instruction
220 * from it and kp->opcode.
222 memcpy(buf
, kp
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
227 /* Dummy buffers for kallsyms_lookup */
228 static char __dummy_buf
[KSYM_NAME_LEN
];
230 /* Check if paddr is at an instruction boundary */
231 static int __kprobes
can_probe(unsigned long paddr
)
234 unsigned long addr
, offset
= 0;
236 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
238 if (!kallsyms_lookup(paddr
, NULL
, &offset
, NULL
, __dummy_buf
))
241 /* Decode instructions */
242 addr
= paddr
- offset
;
243 while (addr
< paddr
) {
244 kernel_insn_init(&insn
, (void *)addr
);
245 insn_get_opcode(&insn
);
248 * Check if the instruction has been modified by another
249 * kprobe, in which case we replace the breakpoint by the
250 * original instruction in our buffer.
252 if (insn
.opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
) {
253 ret
= recover_probed_instruction(buf
, addr
);
256 * Another debugging subsystem might insert
257 * this breakpoint. In that case, we can't
261 kernel_insn_init(&insn
, buf
);
263 insn_get_length(&insn
);
267 return (addr
== paddr
);
271 * Returns non-zero if opcode modifies the interrupt flag.
273 static int __kprobes
is_IF_modifier(kprobe_opcode_t
*insn
)
278 case 0xcf: /* iret/iretd */
279 case 0x9d: /* popf/popfd */
284 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
285 * at the next byte instead.. but of course not recurse infinitely
287 if (is_REX_prefix(insn
))
288 return is_IF_modifier(++insn
);
294 * Copy an instruction and adjust the displacement if the instruction
295 * uses the %rip-relative addressing mode.
296 * If it does, Return the address of the 32-bit displacement word.
297 * If not, return null.
298 * Only applicable to 64-bit x86.
300 static int __kprobes
__copy_instruction(u8
*dest
, u8
*src
, int recover
)
304 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
306 kernel_insn_init(&insn
, src
);
308 insn_get_opcode(&insn
);
309 if (insn
.opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
) {
310 ret
= recover_probed_instruction(buf
,
314 kernel_insn_init(&insn
, buf
);
317 insn_get_length(&insn
);
318 memcpy(dest
, insn
.kaddr
, insn
.length
);
321 if (insn_rip_relative(&insn
)) {
324 kernel_insn_init(&insn
, dest
);
325 insn_get_displacement(&insn
);
327 * The copied instruction uses the %rip-relative addressing
328 * mode. Adjust the displacement for the difference between
329 * the original location of this instruction and the location
330 * of the copy that will actually be run. The tricky bit here
331 * is making sure that the sign extension happens correctly in
332 * this calculation, since we need a signed 32-bit result to
333 * be sign-extended to 64 bits when it's added to the %rip
334 * value and yield the same 64-bit result that the sign-
335 * extension of the original signed 32-bit displacement would
338 newdisp
= (u8
*) src
+ (s64
) insn
.displacement
.value
-
340 BUG_ON((s64
) (s32
) newdisp
!= newdisp
); /* Sanity check. */
341 disp
= (u8
*) dest
+ insn_offset_displacement(&insn
);
342 *(s32
*) disp
= (s32
) newdisp
;
348 static void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
351 * Copy an instruction without recovering int3, because it will be
352 * put by another subsystem.
354 __copy_instruction(p
->ainsn
.insn
, p
->addr
, 0);
356 if (can_boost(p
->addr
))
357 p
->ainsn
.boostable
= 0;
359 p
->ainsn
.boostable
= -1;
361 p
->opcode
= *p
->addr
;
364 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
366 if (alternatives_text_reserved(p
->addr
, p
->addr
))
369 if (!can_probe((unsigned long)p
->addr
))
371 /* insn: must be on special executable page on x86. */
372 p
->ainsn
.insn
= get_insn_slot();
379 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
381 text_poke(p
->addr
, ((unsigned char []){BREAKPOINT_INSTRUCTION
}), 1);
384 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
386 text_poke(p
->addr
, &p
->opcode
, 1);
389 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
392 free_insn_slot(p
->ainsn
.insn
, (p
->ainsn
.boostable
== 1));
393 p
->ainsn
.insn
= NULL
;
397 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
399 kcb
->prev_kprobe
.kp
= kprobe_running();
400 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
401 kcb
->prev_kprobe
.old_flags
= kcb
->kprobe_old_flags
;
402 kcb
->prev_kprobe
.saved_flags
= kcb
->kprobe_saved_flags
;
405 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
407 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
408 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
409 kcb
->kprobe_old_flags
= kcb
->prev_kprobe
.old_flags
;
410 kcb
->kprobe_saved_flags
= kcb
->prev_kprobe
.saved_flags
;
413 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
414 struct kprobe_ctlblk
*kcb
)
416 __get_cpu_var(current_kprobe
) = p
;
417 kcb
->kprobe_saved_flags
= kcb
->kprobe_old_flags
418 = (regs
->flags
& (X86_EFLAGS_TF
| X86_EFLAGS_IF
));
419 if (is_IF_modifier(p
->ainsn
.insn
))
420 kcb
->kprobe_saved_flags
&= ~X86_EFLAGS_IF
;
423 static void __kprobes
clear_btf(void)
425 if (test_thread_flag(TIF_BLOCKSTEP
)) {
426 unsigned long debugctl
= get_debugctlmsr();
428 debugctl
&= ~DEBUGCTLMSR_BTF
;
429 update_debugctlmsr(debugctl
);
433 static void __kprobes
restore_btf(void)
435 if (test_thread_flag(TIF_BLOCKSTEP
)) {
436 unsigned long debugctl
= get_debugctlmsr();
438 debugctl
|= DEBUGCTLMSR_BTF
;
439 update_debugctlmsr(debugctl
);
443 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
444 struct pt_regs
*regs
)
446 unsigned long *sara
= stack_addr(regs
);
448 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
450 /* Replace the return addr with trampoline addr */
451 *sara
= (unsigned long) &kretprobe_trampoline
;
454 #ifdef CONFIG_OPTPROBES
455 static int __kprobes
setup_detour_execution(struct kprobe
*p
,
456 struct pt_regs
*regs
,
459 #define setup_detour_execution(p, regs, reenter) (0)
462 static void __kprobes
setup_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
463 struct kprobe_ctlblk
*kcb
, int reenter
)
465 if (setup_detour_execution(p
, regs
, reenter
))
468 #if !defined(CONFIG_PREEMPT)
469 if (p
->ainsn
.boostable
== 1 && !p
->post_handler
) {
470 /* Boost up -- we can execute copied instructions directly */
472 reset_current_kprobe();
474 * Reentering boosted probe doesn't reset current_kprobe,
475 * nor set current_kprobe, because it doesn't use single
478 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
479 preempt_enable_no_resched();
484 save_previous_kprobe(kcb
);
485 set_current_kprobe(p
, regs
, kcb
);
486 kcb
->kprobe_status
= KPROBE_REENTER
;
488 kcb
->kprobe_status
= KPROBE_HIT_SS
;
489 /* Prepare real single stepping */
491 regs
->flags
|= X86_EFLAGS_TF
;
492 regs
->flags
&= ~X86_EFLAGS_IF
;
493 /* single step inline if the instruction is an int3 */
494 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
495 regs
->ip
= (unsigned long)p
->addr
;
497 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
501 * We have reentered the kprobe_handler(), since another probe was hit while
502 * within the handler. We save the original kprobes variables and just single
503 * step on the instruction of the new probe without calling any user handlers.
505 static int __kprobes
reenter_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
506 struct kprobe_ctlblk
*kcb
)
508 switch (kcb
->kprobe_status
) {
509 case KPROBE_HIT_SSDONE
:
510 case KPROBE_HIT_ACTIVE
:
511 kprobes_inc_nmissed_count(p
);
512 setup_singlestep(p
, regs
, kcb
, 1);
515 /* A probe has been hit in the codepath leading up to, or just
516 * after, single-stepping of a probed instruction. This entire
517 * codepath should strictly reside in .kprobes.text section.
518 * Raise a BUG or we'll continue in an endless reentering loop
519 * and eventually a stack overflow.
521 printk(KERN_WARNING
"Unrecoverable kprobe detected at %p.\n",
526 /* impossible cases */
535 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
536 * remain disabled throughout this function.
538 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
540 kprobe_opcode_t
*addr
;
542 struct kprobe_ctlblk
*kcb
;
544 addr
= (kprobe_opcode_t
*)(regs
->ip
- sizeof(kprobe_opcode_t
));
545 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
547 * The breakpoint instruction was removed right
548 * after we hit it. Another cpu has removed
549 * either a probepoint or a debugger breakpoint
550 * at this address. In either case, no further
551 * handling of this interrupt is appropriate.
552 * Back up over the (now missing) int3 and run
553 * the original instruction.
555 regs
->ip
= (unsigned long)addr
;
560 * We don't want to be preempted for the entire
561 * duration of kprobe processing. We conditionally
562 * re-enable preemption at the end of this function,
563 * and also in reenter_kprobe() and setup_singlestep().
567 kcb
= get_kprobe_ctlblk();
568 p
= get_kprobe(addr
);
571 if (kprobe_running()) {
572 if (reenter_kprobe(p
, regs
, kcb
))
575 set_current_kprobe(p
, regs
, kcb
);
576 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
579 * If we have no pre-handler or it returned 0, we
580 * continue with normal processing. If we have a
581 * pre-handler and it returned non-zero, it prepped
582 * for calling the break_handler below on re-entry
583 * for jprobe processing, so get out doing nothing
586 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
))
587 setup_singlestep(p
, regs
, kcb
, 0);
590 } else if (kprobe_running()) {
591 p
= __get_cpu_var(current_kprobe
);
592 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
593 setup_singlestep(p
, regs
, kcb
, 0);
596 } /* else: not a kprobe fault; let the kernel handle it */
598 preempt_enable_no_resched();
603 #define SAVE_REGS_STRING \
604 /* Skip cs, ip, orig_ax. */ \
605 " subq $24, %rsp\n" \
621 #define RESTORE_REGS_STRING \
637 /* Skip orig_ax, ip, cs */ \
640 #define SAVE_REGS_STRING \
641 /* Skip cs, ip, orig_ax and gs. */ \
642 " subl $16, %esp\n" \
653 #define RESTORE_REGS_STRING \
661 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
666 * When a retprobed function returns, this code saves registers and
667 * calls trampoline_handler() runs, which calls the kretprobe's handler.
669 static void __used __kprobes
kretprobe_trampoline_holder(void)
672 ".global kretprobe_trampoline\n"
673 "kretprobe_trampoline: \n"
675 /* We don't bother saving the ss register */
680 " call trampoline_handler\n"
681 /* Replace saved sp with true return address. */
682 " movq %rax, 152(%rsp)\n"
689 " call trampoline_handler\n"
690 /* Move flags to cs */
691 " movl 56(%esp), %edx\n"
692 " movl %edx, 52(%esp)\n"
693 /* Replace saved flags with true return address. */
694 " movl %eax, 56(%esp)\n"
702 * Called from kretprobe_trampoline
704 static __used __kprobes
void *trampoline_handler(struct pt_regs
*regs
)
706 struct kretprobe_instance
*ri
= NULL
;
707 struct hlist_head
*head
, empty_rp
;
708 struct hlist_node
*node
, *tmp
;
709 unsigned long flags
, orig_ret_address
= 0;
710 unsigned long trampoline_address
= (unsigned long)&kretprobe_trampoline
;
712 INIT_HLIST_HEAD(&empty_rp
);
713 kretprobe_hash_lock(current
, &head
, &flags
);
714 /* fixup registers */
716 regs
->cs
= __KERNEL_CS
;
718 regs
->cs
= __KERNEL_CS
| get_kernel_rpl();
721 regs
->ip
= trampoline_address
;
722 regs
->orig_ax
= ~0UL;
725 * It is possible to have multiple instances associated with a given
726 * task either because multiple functions in the call path have
727 * return probes installed on them, and/or more than one
728 * return probe was registered for a target function.
730 * We can handle this because:
731 * - instances are always pushed into the head of the list
732 * - when multiple return probes are registered for the same
733 * function, the (chronologically) first instance's ret_addr
734 * will be the real return address, and all the rest will
735 * point to kretprobe_trampoline.
737 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
738 if (ri
->task
!= current
)
739 /* another task is sharing our hash bucket */
742 if (ri
->rp
&& ri
->rp
->handler
) {
743 __get_cpu_var(current_kprobe
) = &ri
->rp
->kp
;
744 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
745 ri
->rp
->handler(ri
, regs
);
746 __get_cpu_var(current_kprobe
) = NULL
;
749 orig_ret_address
= (unsigned long)ri
->ret_addr
;
750 recycle_rp_inst(ri
, &empty_rp
);
752 if (orig_ret_address
!= trampoline_address
)
754 * This is the real return address. Any other
755 * instances associated with this task are for
756 * other calls deeper on the call stack
761 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
763 kretprobe_hash_unlock(current
, &flags
);
765 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
766 hlist_del(&ri
->hlist
);
769 return (void *)orig_ret_address
;
773 * Called after single-stepping. p->addr is the address of the
774 * instruction whose first byte has been replaced by the "int 3"
775 * instruction. To avoid the SMP problems that can occur when we
776 * temporarily put back the original opcode to single-step, we
777 * single-stepped a copy of the instruction. The address of this
778 * copy is p->ainsn.insn.
780 * This function prepares to return from the post-single-step
781 * interrupt. We have to fix up the stack as follows:
783 * 0) Except in the case of absolute or indirect jump or call instructions,
784 * the new ip is relative to the copied instruction. We need to make
785 * it relative to the original instruction.
787 * 1) If the single-stepped instruction was pushfl, then the TF and IF
788 * flags are set in the just-pushed flags, and may need to be cleared.
790 * 2) If the single-stepped instruction was a call, the return address
791 * that is atop the stack is the address following the copied instruction.
792 * We need to make it the address following the original instruction.
794 * If this is the first time we've single-stepped the instruction at
795 * this probepoint, and the instruction is boostable, boost it: add a
796 * jump instruction after the copied instruction, that jumps to the next
797 * instruction after the probepoint.
799 static void __kprobes
resume_execution(struct kprobe
*p
,
800 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
802 unsigned long *tos
= stack_addr(regs
);
803 unsigned long copy_ip
= (unsigned long)p
->ainsn
.insn
;
804 unsigned long orig_ip
= (unsigned long)p
->addr
;
805 kprobe_opcode_t
*insn
= p
->ainsn
.insn
;
807 /*skip the REX prefix*/
808 if (is_REX_prefix(insn
))
811 regs
->flags
&= ~X86_EFLAGS_TF
;
813 case 0x9c: /* pushfl */
814 *tos
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_IF
);
815 *tos
|= kcb
->kprobe_old_flags
;
817 case 0xc2: /* iret/ret/lret */
822 case 0xea: /* jmp absolute -- ip is correct */
823 /* ip is already adjusted, no more changes required */
824 p
->ainsn
.boostable
= 1;
826 case 0xe8: /* call relative - Fix return addr */
827 *tos
= orig_ip
+ (*tos
- copy_ip
);
830 case 0x9a: /* call absolute -- same as call absolute, indirect */
831 *tos
= orig_ip
+ (*tos
- copy_ip
);
835 if ((insn
[1] & 0x30) == 0x10) {
837 * call absolute, indirect
838 * Fix return addr; ip is correct.
839 * But this is not boostable
841 *tos
= orig_ip
+ (*tos
- copy_ip
);
843 } else if (((insn
[1] & 0x31) == 0x20) ||
844 ((insn
[1] & 0x31) == 0x21)) {
846 * jmp near and far, absolute indirect
847 * ip is correct. And this is boostable
849 p
->ainsn
.boostable
= 1;
856 if (p
->ainsn
.boostable
== 0) {
857 if ((regs
->ip
> copy_ip
) &&
858 (regs
->ip
- copy_ip
) + 5 < MAX_INSN_SIZE
) {
860 * These instructions can be executed directly if it
861 * jumps back to correct address.
863 synthesize_reljump((void *)regs
->ip
,
864 (void *)orig_ip
+ (regs
->ip
- copy_ip
));
865 p
->ainsn
.boostable
= 1;
867 p
->ainsn
.boostable
= -1;
871 regs
->ip
+= orig_ip
- copy_ip
;
878 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
879 * remain disabled throughout this function.
881 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
883 struct kprobe
*cur
= kprobe_running();
884 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
889 resume_execution(cur
, regs
, kcb
);
890 regs
->flags
|= kcb
->kprobe_saved_flags
;
892 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
893 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
894 cur
->post_handler(cur
, regs
, 0);
897 /* Restore back the original saved kprobes variables and continue. */
898 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
899 restore_previous_kprobe(kcb
);
902 reset_current_kprobe();
904 preempt_enable_no_resched();
907 * if somebody else is singlestepping across a probe point, flags
908 * will have TF set, in which case, continue the remaining processing
909 * of do_debug, as if this is not a probe hit.
911 if (regs
->flags
& X86_EFLAGS_TF
)
917 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
919 struct kprobe
*cur
= kprobe_running();
920 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
922 switch (kcb
->kprobe_status
) {
926 * We are here because the instruction being single
927 * stepped caused a page fault. We reset the current
928 * kprobe and the ip points back to the probe address
929 * and allow the page fault handler to continue as a
932 regs
->ip
= (unsigned long)cur
->addr
;
933 regs
->flags
|= kcb
->kprobe_old_flags
;
934 if (kcb
->kprobe_status
== KPROBE_REENTER
)
935 restore_previous_kprobe(kcb
);
937 reset_current_kprobe();
938 preempt_enable_no_resched();
940 case KPROBE_HIT_ACTIVE
:
941 case KPROBE_HIT_SSDONE
:
943 * We increment the nmissed count for accounting,
944 * we can also use npre/npostfault count for accounting
945 * these specific fault cases.
947 kprobes_inc_nmissed_count(cur
);
950 * We come here because instructions in the pre/post
951 * handler caused the page_fault, this could happen
952 * if handler tries to access user space by
953 * copy_from_user(), get_user() etc. Let the
954 * user-specified handler try to fix it first.
956 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
960 * In case the user-specified fault handler returned
961 * zero, try to fix up.
963 if (fixup_exception(regs
))
967 * fixup routine could not handle it,
968 * Let do_page_fault() fix it.
978 * Wrapper routine for handling exceptions.
980 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
981 unsigned long val
, void *data
)
983 struct die_args
*args
= data
;
984 int ret
= NOTIFY_DONE
;
986 if (args
->regs
&& user_mode_vm(args
->regs
))
991 if (kprobe_handler(args
->regs
))
995 if (post_kprobe_handler(args
->regs
)) {
997 * Reset the BS bit in dr6 (pointed by args->err) to
998 * denote completion of processing
1000 (*(unsigned long *)ERR_PTR(args
->err
)) &= ~DR_STEP
;
1006 * To be potentially processing a kprobe fault and to
1007 * trust the result from kprobe_running(), we have
1008 * be non-preemptible.
1010 if (!preemptible() && kprobe_running() &&
1011 kprobe_fault_handler(args
->regs
, args
->trapnr
))
1020 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1022 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
1024 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1026 kcb
->jprobe_saved_regs
= *regs
;
1027 kcb
->jprobe_saved_sp
= stack_addr(regs
);
1028 addr
= (unsigned long)(kcb
->jprobe_saved_sp
);
1031 * As Linus pointed out, gcc assumes that the callee
1032 * owns the argument space and could overwrite it, e.g.
1033 * tailcall optimization. So, to be absolutely safe
1034 * we also save and restore enough stack bytes to cover
1035 * the argument area.
1037 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
1038 MIN_STACK_SIZE(addr
));
1039 regs
->flags
&= ~X86_EFLAGS_IF
;
1040 trace_hardirqs_off();
1041 regs
->ip
= (unsigned long)(jp
->entry
);
1045 void __kprobes
jprobe_return(void)
1047 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1050 #ifdef CONFIG_X86_64
1051 " xchg %%rbx,%%rsp \n"
1053 " xchgl %%ebx,%%esp \n"
1056 " .globl jprobe_return_end\n"
1057 " jprobe_return_end: \n"
1059 (kcb
->jprobe_saved_sp
):"memory");
1062 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1064 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1065 u8
*addr
= (u8
*) (regs
->ip
- 1);
1066 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
1068 if ((addr
> (u8
*) jprobe_return
) &&
1069 (addr
< (u8
*) jprobe_return_end
)) {
1070 if (stack_addr(regs
) != kcb
->jprobe_saved_sp
) {
1071 struct pt_regs
*saved_regs
= &kcb
->jprobe_saved_regs
;
1073 "current sp %p does not match saved sp %p\n",
1074 stack_addr(regs
), kcb
->jprobe_saved_sp
);
1075 printk(KERN_ERR
"Saved registers for jprobe %p\n", jp
);
1076 show_registers(saved_regs
);
1077 printk(KERN_ERR
"Current registers\n");
1078 show_registers(regs
);
1081 *regs
= kcb
->jprobe_saved_regs
;
1082 memcpy((kprobe_opcode_t
*)(kcb
->jprobe_saved_sp
),
1084 MIN_STACK_SIZE(kcb
->jprobe_saved_sp
));
1085 preempt_enable_no_resched();
1092 #ifdef CONFIG_OPTPROBES
1094 /* Insert a call instruction at address 'from', which calls address 'to'.*/
1095 static void __kprobes
synthesize_relcall(void *from
, void *to
)
1097 __synthesize_relative_insn(from
, to
, RELATIVECALL_OPCODE
);
1100 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
1101 static void __kprobes
synthesize_set_arg1(kprobe_opcode_t
*addr
,
1104 #ifdef CONFIG_X86_64
1110 *(unsigned long *)addr
= val
;
1113 void __kprobes
kprobes_optinsn_template_holder(void)
1116 ".global optprobe_template_entry\n"
1117 "optprobe_template_entry: \n"
1118 #ifdef CONFIG_X86_64
1119 /* We don't bother saving the ss register */
1123 " movq %rsp, %rsi\n"
1124 ".global optprobe_template_val\n"
1125 "optprobe_template_val: \n"
1128 ".global optprobe_template_call\n"
1129 "optprobe_template_call: \n"
1131 /* Move flags to rsp */
1132 " movq 144(%rsp), %rdx\n"
1133 " movq %rdx, 152(%rsp)\n"
1135 /* Skip flags entry */
1138 #else /* CONFIG_X86_32 */
1141 " movl %esp, %edx\n"
1142 ".global optprobe_template_val\n"
1143 "optprobe_template_val: \n"
1145 ".global optprobe_template_call\n"
1146 "optprobe_template_call: \n"
1149 " addl $4, %esp\n" /* skip cs */
1152 ".global optprobe_template_end\n"
1153 "optprobe_template_end: \n");
1156 #define TMPL_MOVE_IDX \
1157 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
1158 #define TMPL_CALL_IDX \
1159 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
1160 #define TMPL_END_IDX \
1161 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
1163 #define INT3_SIZE sizeof(kprobe_opcode_t)
1165 /* Optimized kprobe call back function: called from optinsn */
1166 static void __kprobes
optimized_callback(struct optimized_kprobe
*op
,
1167 struct pt_regs
*regs
)
1169 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1172 if (kprobe_running()) {
1173 kprobes_inc_nmissed_count(&op
->kp
);
1175 /* Save skipped registers */
1176 #ifdef CONFIG_X86_64
1177 regs
->cs
= __KERNEL_CS
;
1179 regs
->cs
= __KERNEL_CS
| get_kernel_rpl();
1182 regs
->ip
= (unsigned long)op
->kp
.addr
+ INT3_SIZE
;
1183 regs
->orig_ax
= ~0UL;
1185 __get_cpu_var(current_kprobe
) = &op
->kp
;
1186 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
1187 opt_pre_handler(&op
->kp
, regs
);
1188 __get_cpu_var(current_kprobe
) = NULL
;
1190 preempt_enable_no_resched();
1193 static int __kprobes
copy_optimized_instructions(u8
*dest
, u8
*src
)
1197 while (len
< RELATIVEJUMP_SIZE
) {
1198 ret
= __copy_instruction(dest
+ len
, src
+ len
, 1);
1199 if (!ret
|| !can_boost(dest
+ len
))
1203 /* Check whether the address range is reserved */
1204 if (ftrace_text_reserved(src
, src
+ len
- 1) ||
1205 alternatives_text_reserved(src
, src
+ len
- 1))
1211 /* Check whether insn is indirect jump */
1212 static int __kprobes
insn_is_indirect_jump(struct insn
*insn
)
1214 return ((insn
->opcode
.bytes
[0] == 0xff &&
1215 (X86_MODRM_REG(insn
->modrm
.value
) & 6) == 4) || /* Jump */
1216 insn
->opcode
.bytes
[0] == 0xea); /* Segment based jump */
1219 /* Check whether insn jumps into specified address range */
1220 static int insn_jump_into_range(struct insn
*insn
, unsigned long start
, int len
)
1222 unsigned long target
= 0;
1224 switch (insn
->opcode
.bytes
[0]) {
1225 case 0xe0: /* loopne */
1226 case 0xe1: /* loope */
1227 case 0xe2: /* loop */
1228 case 0xe3: /* jcxz */
1229 case 0xe9: /* near relative jump */
1230 case 0xeb: /* short relative jump */
1233 if ((insn
->opcode
.bytes
[1] & 0xf0) == 0x80) /* jcc near */
1237 if ((insn
->opcode
.bytes
[0] & 0xf0) == 0x70) /* jcc short */
1241 target
= (unsigned long)insn
->next_byte
+ insn
->immediate
.value
;
1243 return (start
<= target
&& target
<= start
+ len
);
1246 /* Decode whole function to ensure any instructions don't jump into target */
1247 static int __kprobes
can_optimize(unsigned long paddr
)
1250 unsigned long addr
, size
= 0, offset
= 0;
1252 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
1253 /* Dummy buffers for lookup_symbol_attrs */
1254 static char __dummy_buf
[KSYM_NAME_LEN
];
1256 /* Lookup symbol including addr */
1257 if (!kallsyms_lookup(paddr
, &size
, &offset
, NULL
, __dummy_buf
))
1260 /* Check there is enough space for a relative jump. */
1261 if (size
- offset
< RELATIVEJUMP_SIZE
)
1264 /* Decode instructions */
1265 addr
= paddr
- offset
;
1266 while (addr
< paddr
- offset
+ size
) { /* Decode until function end */
1267 if (search_exception_tables(addr
))
1269 * Since some fixup code will jumps into this function,
1270 * we can't optimize kprobe in this function.
1273 kernel_insn_init(&insn
, (void *)addr
);
1274 insn_get_opcode(&insn
);
1275 if (insn
.opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
) {
1276 ret
= recover_probed_instruction(buf
, addr
);
1279 kernel_insn_init(&insn
, buf
);
1281 insn_get_length(&insn
);
1282 /* Recover address */
1283 insn
.kaddr
= (void *)addr
;
1284 insn
.next_byte
= (void *)(addr
+ insn
.length
);
1285 /* Check any instructions don't jump into target */
1286 if (insn_is_indirect_jump(&insn
) ||
1287 insn_jump_into_range(&insn
, paddr
+ INT3_SIZE
,
1288 RELATIVE_ADDR_SIZE
))
1290 addr
+= insn
.length
;
1296 /* Check optimized_kprobe can actually be optimized. */
1297 int __kprobes
arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
1302 for (i
= 1; i
< op
->optinsn
.size
; i
++) {
1303 p
= get_kprobe(op
->kp
.addr
+ i
);
1304 if (p
&& !kprobe_disabled(p
))
1311 /* Check the addr is within the optimized instructions. */
1312 int __kprobes
arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
1315 return ((unsigned long)op
->kp
.addr
<= addr
&&
1316 (unsigned long)op
->kp
.addr
+ op
->optinsn
.size
> addr
);
1319 /* Free optimized instruction slot */
1321 void __arch_remove_optimized_kprobe(struct optimized_kprobe
*op
, int dirty
)
1323 if (op
->optinsn
.insn
) {
1324 free_optinsn_slot(op
->optinsn
.insn
, dirty
);
1325 op
->optinsn
.insn
= NULL
;
1326 op
->optinsn
.size
= 0;
1330 void __kprobes
arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
1332 __arch_remove_optimized_kprobe(op
, 1);
1336 * Copy replacing target instructions
1337 * Target instructions MUST be relocatable (checked inside)
1339 int __kprobes
arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
)
1345 if (!can_optimize((unsigned long)op
->kp
.addr
))
1348 op
->optinsn
.insn
= get_optinsn_slot();
1349 if (!op
->optinsn
.insn
)
1353 * Verify if the address gap is in 2GB range, because this uses
1356 rel
= (long)op
->optinsn
.insn
- (long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
;
1357 if (abs(rel
) > 0x7fffffff)
1360 buf
= (u8
*)op
->optinsn
.insn
;
1362 /* Copy instructions into the out-of-line buffer */
1363 ret
= copy_optimized_instructions(buf
+ TMPL_END_IDX
, op
->kp
.addr
);
1365 __arch_remove_optimized_kprobe(op
, 0);
1368 op
->optinsn
.size
= ret
;
1370 /* Copy arch-dep-instance from template */
1371 memcpy(buf
, &optprobe_template_entry
, TMPL_END_IDX
);
1373 /* Set probe information */
1374 synthesize_set_arg1(buf
+ TMPL_MOVE_IDX
, (unsigned long)op
);
1376 /* Set probe function call */
1377 synthesize_relcall(buf
+ TMPL_CALL_IDX
, optimized_callback
);
1379 /* Set returning jmp instruction at the tail of out-of-line buffer */
1380 synthesize_reljump(buf
+ TMPL_END_IDX
+ op
->optinsn
.size
,
1381 (u8
*)op
->kp
.addr
+ op
->optinsn
.size
);
1383 flush_icache_range((unsigned long) buf
,
1384 (unsigned long) buf
+ TMPL_END_IDX
+
1385 op
->optinsn
.size
+ RELATIVEJUMP_SIZE
);
1389 /* Replace a breakpoint (int3) with a relative jump. */
1390 int __kprobes
arch_optimize_kprobe(struct optimized_kprobe
*op
)
1392 unsigned char jmp_code
[RELATIVEJUMP_SIZE
];
1393 s32 rel
= (s32
)((long)op
->optinsn
.insn
-
1394 ((long)op
->kp
.addr
+ RELATIVEJUMP_SIZE
));
1396 /* Backup instructions which will be replaced by jump address */
1397 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
+ INT3_SIZE
,
1398 RELATIVE_ADDR_SIZE
);
1400 jmp_code
[0] = RELATIVEJUMP_OPCODE
;
1401 *(s32
*)(&jmp_code
[1]) = rel
;
1404 * text_poke_smp doesn't support NMI/MCE code modifying.
1405 * However, since kprobes itself also doesn't support NMI/MCE
1406 * code probing, it's not a problem.
1408 text_poke_smp(op
->kp
.addr
, jmp_code
, RELATIVEJUMP_SIZE
);
1412 /* Replace a relative jump with a breakpoint (int3). */
1413 void __kprobes
arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
1415 u8 buf
[RELATIVEJUMP_SIZE
];
1417 /* Set int3 to first byte for kprobes */
1418 buf
[0] = BREAKPOINT_INSTRUCTION
;
1419 memcpy(buf
+ 1, op
->optinsn
.copied_insn
, RELATIVE_ADDR_SIZE
);
1420 text_poke_smp(op
->kp
.addr
, buf
, RELATIVEJUMP_SIZE
);
1423 static int __kprobes
setup_detour_execution(struct kprobe
*p
,
1424 struct pt_regs
*regs
,
1427 struct optimized_kprobe
*op
;
1429 if (p
->flags
& KPROBE_FLAG_OPTIMIZED
) {
1430 /* This kprobe is really able to run optimized path. */
1431 op
= container_of(p
, struct optimized_kprobe
, kp
);
1432 /* Detour through copied instructions */
1433 regs
->ip
= (unsigned long)op
->optinsn
.insn
+ TMPL_END_IDX
;
1435 reset_current_kprobe();
1436 preempt_enable_no_resched();
1443 int __init
arch_init_kprobes(void)
1448 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)