2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
30 * Added function return probes functionality
33 #include <linux/kprobes.h>
34 #include <linux/ptrace.h>
35 #include <linux/string.h>
36 #include <linux/slab.h>
37 #include <linux/preempt.h>
38 #include <linux/module.h>
39 #include <linux/kdebug.h>
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
43 #include <asm/alternative.h>
45 void jprobe_return_end(void);
46 static void __kprobes
arch_copy_kprobe(struct kprobe
*p
);
48 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
49 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
51 struct kretprobe_blackpoint kretprobe_blacklist
[] = {
52 {"__switch_to", }, /* This function switches only current task, but
53 doesn't switch kernel stack.*/
54 {NULL
, NULL
} /* Terminator */
56 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
58 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
59 static __always_inline
void set_jmp_op(void *from
, void *to
)
61 struct __arch_jmp_op
{
64 } __attribute__((packed
)) * jop
;
65 jop
= (struct __arch_jmp_op
*)from
;
66 jop
->raddr
= (s32
)((long)(to
) - ((long)(from
) + 5));
67 jop
->op
= RELATIVEJUMP_INSTRUCTION
;
71 * returns non-zero if opcode is boostable
72 * RIP relative instructions are adjusted at copying time
74 static __always_inline
int can_boost(kprobe_opcode_t
*opcodes
)
76 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
77 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
78 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
79 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
80 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
83 * Undefined/reserved opcodes, conditional jump, Opcode Extension
84 * Groups, and some special opcodes can not boost.
86 static const unsigned long twobyte_is_boostable
[256 / 64] = {
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
89 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0)|/* 00 */
90 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 10 */
91 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 20 */
92 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),/* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 40 */
94 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1)|/* 60 */
96 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),/* 70 */
97 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 90 */
99 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),/* b0 */
101 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1)|/* c0 */
102 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* d0 */
103 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* e0 */
104 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
105 /* ----------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
109 kprobe_opcode_t opcode
;
110 kprobe_opcode_t
*orig_opcodes
= opcodes
;
113 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
115 opcode
= *(opcodes
++);
117 /* 2nd-byte opcode */
118 if (opcode
== 0x0f) {
119 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
121 return test_bit(*opcodes
, twobyte_is_boostable
);
124 switch (opcode
& 0xf0) {
126 goto retry
; /* REX prefix is boostable */
128 if (0x63 < opcode
&& opcode
< 0x67)
129 goto retry
; /* prefixes */
130 /* can't boost Address-size override and bound */
131 return (opcode
!= 0x62 && opcode
!= 0x67);
133 return 0; /* can't boost conditional jump */
135 /* can't boost software-interruptions */
136 return (0xc1 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
138 /* can boost AA* and XLAT */
139 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
141 /* can boost in/out and absolute jmps */
142 return ((opcode
& 0x04) || opcode
== 0xea);
144 if ((opcode
& 0x0c) == 0 && opcode
!= 0xf1)
145 goto retry
; /* lock/rep(ne) prefix */
146 /* clear and set flags are boostable */
147 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
149 /* segment override prefixes are boostable */
150 if (opcode
== 0x26 || opcode
== 0x36 || opcode
== 0x3e)
151 goto retry
; /* prefixes */
152 /* CS override prefix and call are not boostable */
153 return (opcode
!= 0x2e && opcode
!= 0x9a);
158 * returns non-zero if opcode modifies the interrupt flag.
160 static int __kprobes
is_IF_modifier(kprobe_opcode_t
*insn
)
165 case 0xcf: /* iret/iretd */
166 case 0x9d: /* popf/popfd */
170 if (*insn
>= 0x40 && *insn
<= 0x4f && *++insn
== 0xcf)
175 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
177 /* insn: must be on special executable page on x86_64. */
178 p
->ainsn
.insn
= get_insn_slot();
179 if (!p
->ainsn
.insn
) {
187 * Determine if the instruction uses the %rip-relative addressing mode.
188 * If it does, Return the address of the 32-bit displacement word.
189 * If not, return null.
191 static s32 __kprobes
*is_riprel(u8
*insn
)
193 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
194 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
195 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
196 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
197 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
199 static const u64 onebyte_has_modrm
[256 / 64] = {
200 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
201 /* ------------------------------- */
202 W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
203 W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
204 W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
205 W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
206 W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
207 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
208 W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
209 W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
210 W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
211 W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
212 W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
213 W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
214 W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
215 W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
216 W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
217 W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */
218 /* ------------------------------- */
219 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
221 static const u64 twobyte_has_modrm
[256 / 64] = {
222 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
223 /* ------------------------------- */
224 W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
225 W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
226 W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
227 W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
228 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
229 W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
230 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
231 W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
232 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
233 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
234 W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
235 W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
236 W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
237 W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
238 W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
239 W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */
240 /* ------------------------------- */
241 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
246 /* Skip legacy instruction prefixes. */
266 /* Skip REX instruction prefix. */
267 if ((*insn
& 0xf0) == 0x40)
270 if (*insn
== 0x0f) { /* Two-byte opcode. */
272 need_modrm
= test_bit(*insn
, twobyte_has_modrm
);
273 } else { /* One-byte opcode. */
274 need_modrm
= test_bit(*insn
, onebyte_has_modrm
);
279 if ((modrm
& 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
280 /* Displacement follows ModRM byte. */
281 return (s32
*) ++insn
;
285 /* No %rip-relative addressing mode here. */
289 static void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
292 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
);
293 ripdisp
= is_riprel(p
->ainsn
.insn
);
296 * The copied instruction uses the %rip-relative
297 * addressing mode. Adjust the displacement for the
298 * difference between the original location of this
299 * instruction and the location of the copy that will
300 * actually be run. The tricky bit here is making sure
301 * that the sign extension happens correctly in this
302 * calculation, since we need a signed 32-bit result to
303 * be sign-extended to 64 bits when it's added to the
304 * %rip value and yield the same 64-bit result that the
305 * sign-extension of the original signed 32-bit
306 * displacement would have given.
308 s64 disp
= (u8
*) p
->addr
+ *ripdisp
- (u8
*) p
->ainsn
.insn
;
309 BUG_ON((s64
) (s32
) disp
!= disp
); /* Sanity check. */
312 if (can_boost(p
->addr
)) {
313 p
->ainsn
.boostable
= 0;
315 p
->ainsn
.boostable
= -1;
317 p
->opcode
= *p
->addr
;
320 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
322 text_poke(p
->addr
, ((unsigned char []){BREAKPOINT_INSTRUCTION
}), 1);
325 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
327 text_poke(p
->addr
, &p
->opcode
, 1);
330 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
332 mutex_lock(&kprobe_mutex
);
333 free_insn_slot(p
->ainsn
.insn
, (p
->ainsn
.boostable
== 1));
334 mutex_unlock(&kprobe_mutex
);
337 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
339 kcb
->prev_kprobe
.kp
= kprobe_running();
340 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
341 kcb
->prev_kprobe
.old_rflags
= kcb
->kprobe_old_rflags
;
342 kcb
->prev_kprobe
.saved_rflags
= kcb
->kprobe_saved_rflags
;
345 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
347 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
348 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
349 kcb
->kprobe_old_rflags
= kcb
->prev_kprobe
.old_rflags
;
350 kcb
->kprobe_saved_rflags
= kcb
->prev_kprobe
.saved_rflags
;
353 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
354 struct kprobe_ctlblk
*kcb
)
356 __get_cpu_var(current_kprobe
) = p
;
357 kcb
->kprobe_saved_rflags
= kcb
->kprobe_old_rflags
358 = (regs
->flags
& (TF_MASK
| IF_MASK
));
359 if (is_IF_modifier(p
->ainsn
.insn
))
360 kcb
->kprobe_saved_rflags
&= ~IF_MASK
;
363 static __always_inline
void clear_btf(void)
365 if (test_thread_flag(TIF_DEBUGCTLMSR
))
366 wrmsrl(MSR_IA32_DEBUGCTLMSR
, 0);
369 static __always_inline
void restore_btf(void)
371 if (test_thread_flag(TIF_DEBUGCTLMSR
))
372 wrmsrl(MSR_IA32_DEBUGCTLMSR
, current
->thread
.debugctlmsr
);
375 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
378 regs
->flags
|= TF_MASK
;
379 regs
->flags
&= ~IF_MASK
;
380 /*single step inline if the instruction is an int3*/
381 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
382 regs
->ip
= (unsigned long)p
->addr
;
384 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
387 /* Called with kretprobe_lock held */
388 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
389 struct pt_regs
*regs
)
391 unsigned long *sara
= (unsigned long *)regs
->sp
;
393 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
394 /* Replace the return addr with trampoline addr */
395 *sara
= (unsigned long) &kretprobe_trampoline
;
398 int __kprobes
kprobe_handler(struct pt_regs
*regs
)
402 kprobe_opcode_t
*addr
= (kprobe_opcode_t
*)(regs
->ip
- sizeof(kprobe_opcode_t
));
403 struct kprobe_ctlblk
*kcb
;
406 * We don't want to be preempted for the entire
407 * duration of kprobe processing
410 kcb
= get_kprobe_ctlblk();
412 /* Check we're not actually recursing */
413 if (kprobe_running()) {
414 p
= get_kprobe(addr
);
416 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
417 *p
->ainsn
.insn
== BREAKPOINT_INSTRUCTION
) {
418 regs
->flags
&= ~TF_MASK
;
419 regs
->flags
|= kcb
->kprobe_saved_rflags
;
421 } else if (kcb
->kprobe_status
== KPROBE_HIT_SSDONE
) {
422 /* TODO: Provide re-entrancy from
423 * post_kprobes_handler() and avoid exception
424 * stack corruption while single-stepping on
425 * the instruction of the new probe.
427 arch_disarm_kprobe(p
);
428 regs
->ip
= (unsigned long)p
->addr
;
429 reset_current_kprobe();
432 /* We have reentered the kprobe_handler(), since
433 * another probe was hit while within the
434 * handler. We here save the original kprobe
435 * variables and just single step on instruction
436 * of the new probe without calling any user
439 save_previous_kprobe(kcb
);
440 set_current_kprobe(p
, regs
, kcb
);
441 kprobes_inc_nmissed_count(p
);
442 prepare_singlestep(p
, regs
);
443 kcb
->kprobe_status
= KPROBE_REENTER
;
447 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
448 /* The breakpoint instruction was removed by
449 * another cpu right after we hit, no further
450 * handling of this interrupt is appropriate
452 regs
->ip
= (unsigned long)addr
;
456 p
= __get_cpu_var(current_kprobe
);
457 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
464 p
= get_kprobe(addr
);
466 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
468 * The breakpoint instruction was removed right
469 * after we hit it. Another cpu has removed
470 * either a probepoint or a debugger breakpoint
471 * at this address. In either case, no further
472 * handling of this interrupt is appropriate.
473 * Back up over the (now missing) int3 and run
474 * the original instruction.
476 regs
->ip
= (unsigned long)addr
;
479 /* Not one of ours: let kernel handle it */
483 set_current_kprobe(p
, regs
, kcb
);
484 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
486 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
487 /* handler has already set things up, so skip ss setup */
491 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
492 if (p
->ainsn
.boostable
== 1 && !p
->post_handler
) {
493 /* Boost up -- we can execute copied instructions directly */
494 reset_current_kprobe();
495 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
496 preempt_enable_no_resched();
500 prepare_singlestep(p
, regs
);
501 kcb
->kprobe_status
= KPROBE_HIT_SS
;
505 preempt_enable_no_resched();
510 * For function-return probes, init_kprobes() establishes a probepoint
511 * here. When a retprobed function returns, this probe is hit and
512 * trampoline_probe_handler() runs, calling the kretprobe's handler.
514 void kretprobe_trampoline_holder(void)
516 asm volatile ( ".global kretprobe_trampoline\n"
517 "kretprobe_trampoline: \n"
522 * Called when we hit the probe point at kretprobe_trampoline
524 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
526 struct kretprobe_instance
*ri
= NULL
;
527 struct hlist_head
*head
, empty_rp
;
528 struct hlist_node
*node
, *tmp
;
529 unsigned long flags
, orig_ret_address
= 0;
530 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
532 INIT_HLIST_HEAD(&empty_rp
);
533 spin_lock_irqsave(&kretprobe_lock
, flags
);
534 head
= kretprobe_inst_table_head(current
);
537 * It is possible to have multiple instances associated with a given
538 * task either because an multiple functions in the call path
539 * have a return probe installed on them, and/or more then one return
540 * return probe was registered for a target function.
542 * We can handle this because:
543 * - instances are always inserted at the head of the list
544 * - when multiple return probes are registered for the same
545 * function, the first instance's ret_addr will point to the
546 * real return address, and all the rest will point to
547 * kretprobe_trampoline
549 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
550 if (ri
->task
!= current
)
551 /* another task is sharing our hash bucket */
554 if (ri
->rp
&& ri
->rp
->handler
)
555 ri
->rp
->handler(ri
, regs
);
557 orig_ret_address
= (unsigned long)ri
->ret_addr
;
558 recycle_rp_inst(ri
, &empty_rp
);
560 if (orig_ret_address
!= trampoline_address
)
562 * This is the real return address. Any other
563 * instances associated with this task are for
564 * other calls deeper on the call stack
569 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
570 regs
->ip
= orig_ret_address
;
572 reset_current_kprobe();
573 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
574 preempt_enable_no_resched();
576 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
577 hlist_del(&ri
->hlist
);
581 * By returning a non-zero value, we are telling
582 * kprobe_handler() that we don't want the post_handler
583 * to run (and have re-enabled preemption)
589 * Called after single-stepping. p->addr is the address of the
590 * instruction whose first byte has been replaced by the "int 3"
591 * instruction. To avoid the SMP problems that can occur when we
592 * temporarily put back the original opcode to single-step, we
593 * single-stepped a copy of the instruction. The address of this
594 * copy is p->ainsn.insn.
596 * This function prepares to return from the post-single-step
597 * interrupt. We have to fix up the stack as follows:
599 * 0) Except in the case of absolute or indirect jump or call instructions,
600 * the new ip is relative to the copied instruction. We need to make
601 * it relative to the original instruction.
603 * 1) If the single-stepped instruction was pushfl, then the TF and IF
604 * flags are set in the just-pushed flags, and may need to be cleared.
606 * 2) If the single-stepped instruction was a call, the return address
607 * that is atop the stack is the address following the copied instruction.
608 * We need to make it the address following the original instruction.
610 * If this is the first time we've single-stepped the instruction at
611 * this probepoint, and the instruction is boostable, boost it: add a
612 * jump instruction after the copied instruction, that jumps to the next
613 * instruction after the probepoint.
615 static void __kprobes
resume_execution(struct kprobe
*p
,
616 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
618 unsigned long *tos
= (unsigned long *)regs
->sp
;
619 unsigned long copy_rip
= (unsigned long)p
->ainsn
.insn
;
620 unsigned long orig_rip
= (unsigned long)p
->addr
;
621 kprobe_opcode_t
*insn
= p
->ainsn
.insn
;
623 /*skip the REX prefix*/
624 if (*insn
>= 0x40 && *insn
<= 0x4f)
627 regs
->flags
&= ~TF_MASK
;
629 case 0x9c: /* pushfl */
630 *tos
&= ~(TF_MASK
| IF_MASK
);
631 *tos
|= kcb
->kprobe_old_rflags
;
633 case 0xc2: /* iret/ret/lret */
638 case 0xea: /* jmp absolute -- ip is correct */
639 /* ip is already adjusted, no more changes required */
640 p
->ainsn
.boostable
= 1;
642 case 0xe8: /* call relative - Fix return addr */
643 *tos
= orig_rip
+ (*tos
- copy_rip
);
646 if ((insn
[1] & 0x30) == 0x10) {
647 /* call absolute, indirect */
648 /* Fix return addr; ip is correct. */
650 *tos
= orig_rip
+ (*tos
- copy_rip
);
652 } else if (((insn
[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
653 ((insn
[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
654 /* ip is correct. And this is boostable */
655 p
->ainsn
.boostable
= 1;
662 if (p
->ainsn
.boostable
== 0) {
663 if ((regs
->ip
> copy_rip
) &&
664 (regs
->ip
- copy_rip
) + 5 < MAX_INSN_SIZE
) {
666 * These instructions can be executed directly if it
667 * jumps back to correct address.
669 set_jmp_op((void *)regs
->ip
,
670 (void *)orig_rip
+ (regs
->ip
- copy_rip
));
671 p
->ainsn
.boostable
= 1;
673 p
->ainsn
.boostable
= -1;
677 regs
->ip
= orig_rip
+ (regs
->ip
- copy_rip
);
685 int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
687 struct kprobe
*cur
= kprobe_running();
688 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
693 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
694 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
695 cur
->post_handler(cur
, regs
, 0);
698 resume_execution(cur
, regs
, kcb
);
699 regs
->flags
|= kcb
->kprobe_saved_rflags
;
700 trace_hardirqs_fixup_flags(regs
->flags
);
702 /* Restore the original saved kprobes variables and continue. */
703 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
704 restore_previous_kprobe(kcb
);
707 reset_current_kprobe();
709 preempt_enable_no_resched();
712 * if somebody else is singlestepping across a probe point, flags
713 * will have TF set, in which case, continue the remaining processing
714 * of do_debug, as if this is not a probe hit.
716 if (regs
->flags
& TF_MASK
)
722 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
724 struct kprobe
*cur
= kprobe_running();
725 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
726 const struct exception_table_entry
*fixup
;
728 switch(kcb
->kprobe_status
) {
732 * We are here because the instruction being single
733 * stepped caused a page fault. We reset the current
734 * kprobe and the ip points back to the probe address
735 * and allow the page fault handler to continue as a
738 regs
->ip
= (unsigned long)cur
->addr
;
739 regs
->flags
|= kcb
->kprobe_old_rflags
;
740 if (kcb
->kprobe_status
== KPROBE_REENTER
)
741 restore_previous_kprobe(kcb
);
743 reset_current_kprobe();
744 preempt_enable_no_resched();
746 case KPROBE_HIT_ACTIVE
:
747 case KPROBE_HIT_SSDONE
:
749 * We increment the nmissed count for accounting,
750 * we can also use npre/npostfault count for accouting
751 * these specific fault cases.
753 kprobes_inc_nmissed_count(cur
);
756 * We come here because instructions in the pre/post
757 * handler caused the page_fault, this could happen
758 * if handler tries to access user space by
759 * copy_from_user(), get_user() etc. Let the
760 * user-specified handler try to fix it first.
762 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
766 * In case the user-specified fault handler returned
767 * zero, try to fix up.
769 fixup
= search_exception_tables(regs
->ip
);
771 regs
->ip
= fixup
->fixup
;
776 * fixup() could not handle it,
777 * Let do_page_fault() fix it.
787 * Wrapper routine for handling exceptions.
789 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
790 unsigned long val
, void *data
)
792 struct die_args
*args
= (struct die_args
*)data
;
793 int ret
= NOTIFY_DONE
;
795 if (args
->regs
&& user_mode(args
->regs
))
800 if (kprobe_handler(args
->regs
))
804 if (post_kprobe_handler(args
->regs
))
808 /* kprobe_running() needs smp_processor_id() */
810 if (kprobe_running() &&
811 kprobe_fault_handler(args
->regs
, args
->trapnr
))
821 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
823 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
825 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
827 kcb
->jprobe_saved_regs
= *regs
;
828 kcb
->jprobe_saved_rsp
= (long *) regs
->sp
;
829 addr
= (unsigned long)(kcb
->jprobe_saved_rsp
);
831 * As Linus pointed out, gcc assumes that the callee
832 * owns the argument space and could overwrite it, e.g.
833 * tailcall optimization. So, to be absolutely safe
834 * we also save and restore enough stack bytes to cover
837 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
838 MIN_STACK_SIZE(addr
));
839 regs
->flags
&= ~IF_MASK
;
840 trace_hardirqs_off();
841 regs
->ip
= (unsigned long)(jp
->entry
);
845 void __kprobes
jprobe_return(void)
847 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
849 asm volatile (" xchg %%rbx,%%rsp \n"
851 " .globl jprobe_return_end \n"
852 " jprobe_return_end: \n"
854 (kcb
->jprobe_saved_rsp
):"memory");
857 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
859 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
860 u8
*addr
= (u8
*) (regs
->ip
- 1);
861 unsigned long stack_addr
= (unsigned long)(kcb
->jprobe_saved_rsp
);
862 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
864 if ((addr
> (u8
*) jprobe_return
) && (addr
< (u8
*) jprobe_return_end
)) {
865 if ((unsigned long *)regs
->sp
!= kcb
->jprobe_saved_rsp
) {
866 struct pt_regs
*saved_regs
= &kcb
->jprobe_saved_regs
;
867 printk("current sp %p does not match saved sp %p\n",
868 (long *)regs
->sp
, kcb
->jprobe_saved_rsp
);
869 printk("Saved registers for jprobe %p\n", jp
);
870 show_registers(saved_regs
);
871 printk("Current registers\n");
872 show_registers(regs
);
875 *regs
= kcb
->jprobe_saved_regs
;
876 memcpy((kprobe_opcode_t
*) stack_addr
, kcb
->jprobes_stack
,
877 MIN_STACK_SIZE(stack_addr
));
878 preempt_enable_no_resched();
884 static struct kprobe trampoline_p
= {
885 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
886 .pre_handler
= trampoline_probe_handler
889 int __init
arch_init_kprobes(void)
891 return register_kprobe(&trampoline_p
);
894 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
896 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)