[PATCH] Switch Kprobes inline functions to __kprobes for x86_64
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / kernel / kprobes.c
blob1eaa5dae6174ff57b514a56828f07e6f8ed0a149
1 /*
2 * Kernel Probes (KProbes)
3 * arch/x86_64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi
27 * <prasanna@in.ibm.com> adapted for x86_64
28 * 2005-Mar Roland McGrath <roland@redhat.com>
29 * Fixed to handle %rip-relative addressing mode correctly.
30 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
31 * Added function return probes functionality
34 #include <linux/config.h>
35 #include <linux/kprobes.h>
36 #include <linux/ptrace.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/preempt.h>
40 #include <linux/module.h>
42 #include <asm/cacheflush.h>
43 #include <asm/pgtable.h>
44 #include <asm/kdebug.h>
45 #include <asm/uaccess.h>
47 void jprobe_return_end(void);
48 static void __kprobes arch_copy_kprobe(struct kprobe *p);
50 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
51 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
54 * returns non-zero if opcode modifies the interrupt flag.
56 static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
58 switch (*insn) {
59 case 0xfa: /* cli */
60 case 0xfb: /* sti */
61 case 0xcf: /* iret/iretd */
62 case 0x9d: /* popf/popfd */
63 return 1;
66 if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
67 return 1;
68 return 0;
71 int __kprobes arch_prepare_kprobe(struct kprobe *p)
73 /* insn: must be on special executable page on x86_64. */
74 p->ainsn.insn = get_insn_slot();
75 if (!p->ainsn.insn) {
76 return -ENOMEM;
78 arch_copy_kprobe(p);
79 return 0;
83 * Determine if the instruction uses the %rip-relative addressing mode.
84 * If it does, return the address of the 32-bit displacement word.
85 * If not, return null.
87 static s32 __kprobes *is_riprel(u8 *insn)
89 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
90 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
91 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
92 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
93 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
94 << (row % 64))
95 static const u64 onebyte_has_modrm[256 / 64] = {
96 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
97 /* ------------------------------- */
98 W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
99 W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
100 W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
101 W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
102 W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
103 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
104 W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
105 W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
106 W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
107 W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
108 W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
109 W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
110 W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
111 W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
112 W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
113 W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */
114 /* ------------------------------- */
115 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
117 static const u64 twobyte_has_modrm[256 / 64] = {
118 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
119 /* ------------------------------- */
120 W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
121 W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
122 W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
123 W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
124 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
125 W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
126 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
127 W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
128 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
129 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
130 W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
131 W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
132 W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
133 W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
134 W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
135 W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */
136 /* ------------------------------- */
137 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
139 #undef W
140 int need_modrm;
142 /* Skip legacy instruction prefixes. */
143 while (1) {
144 switch (*insn) {
145 case 0x66:
146 case 0x67:
147 case 0x2e:
148 case 0x3e:
149 case 0x26:
150 case 0x64:
151 case 0x65:
152 case 0x36:
153 case 0xf0:
154 case 0xf3:
155 case 0xf2:
156 ++insn;
157 continue;
159 break;
162 /* Skip REX instruction prefix. */
163 if ((*insn & 0xf0) == 0x40)
164 ++insn;
166 if (*insn == 0x0f) { /* Two-byte opcode. */
167 ++insn;
168 need_modrm = test_bit(*insn, twobyte_has_modrm);
169 } else { /* One-byte opcode. */
170 need_modrm = test_bit(*insn, onebyte_has_modrm);
173 if (need_modrm) {
174 u8 modrm = *++insn;
175 if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
176 /* Displacement follows ModRM byte. */
177 return (s32 *) ++insn;
181 /* No %rip-relative addressing mode here. */
182 return NULL;
185 static void __kprobes arch_copy_kprobe(struct kprobe *p)
187 s32 *ripdisp;
188 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
189 ripdisp = is_riprel(p->ainsn.insn);
190 if (ripdisp) {
192 * The copied instruction uses the %rip-relative
193 * addressing mode. Adjust the displacement for the
194 * difference between the original location of this
195 * instruction and the location of the copy that will
196 * actually be run. The tricky bit here is making sure
197 * that the sign extension happens correctly in this
198 * calculation, since we need a signed 32-bit result to
199 * be sign-extended to 64 bits when it's added to the
200 * %rip value and yield the same 64-bit result that the
201 * sign-extension of the original signed 32-bit
202 * displacement would have given.
204 s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
205 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
206 *ripdisp = disp;
208 p->opcode = *p->addr;
211 void __kprobes arch_arm_kprobe(struct kprobe *p)
213 *p->addr = BREAKPOINT_INSTRUCTION;
214 flush_icache_range((unsigned long) p->addr,
215 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
218 void __kprobes arch_disarm_kprobe(struct kprobe *p)
220 *p->addr = p->opcode;
221 flush_icache_range((unsigned long) p->addr,
222 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
225 void __kprobes arch_remove_kprobe(struct kprobe *p)
227 mutex_lock(&kprobe_mutex);
228 free_insn_slot(p->ainsn.insn);
229 mutex_unlock(&kprobe_mutex);
232 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
234 kcb->prev_kprobe.kp = kprobe_running();
235 kcb->prev_kprobe.status = kcb->kprobe_status;
236 kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
237 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
240 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
242 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
243 kcb->kprobe_status = kcb->prev_kprobe.status;
244 kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
245 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
248 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
249 struct kprobe_ctlblk *kcb)
251 __get_cpu_var(current_kprobe) = p;
252 kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
253 = (regs->eflags & (TF_MASK | IF_MASK));
254 if (is_IF_modifier(p->ainsn.insn))
255 kcb->kprobe_saved_rflags &= ~IF_MASK;
258 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
260 regs->eflags |= TF_MASK;
261 regs->eflags &= ~IF_MASK;
262 /*single step inline if the instruction is an int3*/
263 if (p->opcode == BREAKPOINT_INSTRUCTION)
264 regs->rip = (unsigned long)p->addr;
265 else
266 regs->rip = (unsigned long)p->ainsn.insn;
269 /* Called with kretprobe_lock held */
270 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
271 struct pt_regs *regs)
273 unsigned long *sara = (unsigned long *)regs->rsp;
274 struct kretprobe_instance *ri;
276 if ((ri = get_free_rp_inst(rp)) != NULL) {
277 ri->rp = rp;
278 ri->task = current;
279 ri->ret_addr = (kprobe_opcode_t *) *sara;
281 /* Replace the return addr with trampoline addr */
282 *sara = (unsigned long) &kretprobe_trampoline;
284 add_rp_inst(ri);
285 } else {
286 rp->nmissed++;
290 int __kprobes kprobe_handler(struct pt_regs *regs)
292 struct kprobe *p;
293 int ret = 0;
294 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
295 struct kprobe_ctlblk *kcb;
298 * We don't want to be preempted for the entire
299 * duration of kprobe processing
301 preempt_disable();
302 kcb = get_kprobe_ctlblk();
304 /* Check we're not actually recursing */
305 if (kprobe_running()) {
306 p = get_kprobe(addr);
307 if (p) {
308 if (kcb->kprobe_status == KPROBE_HIT_SS &&
309 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
310 regs->eflags &= ~TF_MASK;
311 regs->eflags |= kcb->kprobe_saved_rflags;
312 goto no_kprobe;
313 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
314 /* TODO: Provide re-entrancy from
315 * post_kprobes_handler() and avoid exception
316 * stack corruption while single-stepping on
317 * the instruction of the new probe.
319 arch_disarm_kprobe(p);
320 regs->rip = (unsigned long)p->addr;
321 reset_current_kprobe();
322 ret = 1;
323 } else {
324 /* We have reentered the kprobe_handler(), since
325 * another probe was hit while within the
326 * handler. We here save the original kprobe
327 * variables and just single step on instruction
328 * of the new probe without calling any user
329 * handlers.
331 save_previous_kprobe(kcb);
332 set_current_kprobe(p, regs, kcb);
333 kprobes_inc_nmissed_count(p);
334 prepare_singlestep(p, regs);
335 kcb->kprobe_status = KPROBE_REENTER;
336 return 1;
338 } else {
339 if (*addr != BREAKPOINT_INSTRUCTION) {
340 /* The breakpoint instruction was removed by
341 * another cpu right after we hit, no further
342 * handling of this interrupt is appropriate
344 regs->rip = (unsigned long)addr;
345 ret = 1;
346 goto no_kprobe;
348 p = __get_cpu_var(current_kprobe);
349 if (p->break_handler && p->break_handler(p, regs)) {
350 goto ss_probe;
353 goto no_kprobe;
356 p = get_kprobe(addr);
357 if (!p) {
358 if (*addr != BREAKPOINT_INSTRUCTION) {
360 * The breakpoint instruction was removed right
361 * after we hit it. Another cpu has removed
362 * either a probepoint or a debugger breakpoint
363 * at this address. In either case, no further
364 * handling of this interrupt is appropriate.
365 * Back up over the (now missing) int3 and run
366 * the original instruction.
368 regs->rip = (unsigned long)addr;
369 ret = 1;
371 /* Not one of ours: let kernel handle it */
372 goto no_kprobe;
375 set_current_kprobe(p, regs, kcb);
376 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
378 if (p->pre_handler && p->pre_handler(p, regs))
379 /* handler has already set things up, so skip ss setup */
380 return 1;
382 ss_probe:
383 prepare_singlestep(p, regs);
384 kcb->kprobe_status = KPROBE_HIT_SS;
385 return 1;
387 no_kprobe:
388 preempt_enable_no_resched();
389 return ret;
393 * For function-return probes, init_kprobes() establishes a probepoint
394 * here. When a retprobed function returns, this probe is hit and
395 * trampoline_probe_handler() runs, calling the kretprobe's handler.
397 void kretprobe_trampoline_holder(void)
399 asm volatile ( ".global kretprobe_trampoline\n"
400 "kretprobe_trampoline: \n"
401 "nop\n");
405 * Called when we hit the probe point at kretprobe_trampoline
407 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
409 struct kretprobe_instance *ri = NULL;
410 struct hlist_head *head;
411 struct hlist_node *node, *tmp;
412 unsigned long flags, orig_ret_address = 0;
413 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
415 spin_lock_irqsave(&kretprobe_lock, flags);
416 head = kretprobe_inst_table_head(current);
419 * It is possible to have multiple instances associated with a given
420 * task either because an multiple functions in the call path
421 * have a return probe installed on them, and/or more then one return
422 * return probe was registered for a target function.
424 * We can handle this because:
425 * - instances are always inserted at the head of the list
426 * - when multiple return probes are registered for the same
427 * function, the first instance's ret_addr will point to the
428 * real return address, and all the rest will point to
429 * kretprobe_trampoline
431 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
432 if (ri->task != current)
433 /* another task is sharing our hash bucket */
434 continue;
436 if (ri->rp && ri->rp->handler)
437 ri->rp->handler(ri, regs);
439 orig_ret_address = (unsigned long)ri->ret_addr;
440 recycle_rp_inst(ri);
442 if (orig_ret_address != trampoline_address)
444 * This is the real return address. Any other
445 * instances associated with this task are for
446 * other calls deeper on the call stack
448 break;
451 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
452 regs->rip = orig_ret_address;
454 reset_current_kprobe();
455 spin_unlock_irqrestore(&kretprobe_lock, flags);
456 preempt_enable_no_resched();
459 * By returning a non-zero value, we are telling
460 * kprobe_handler() that we don't want the post_handler
461 * to run (and have re-enabled preemption)
463 return 1;
467 * Called after single-stepping. p->addr is the address of the
468 * instruction whose first byte has been replaced by the "int 3"
469 * instruction. To avoid the SMP problems that can occur when we
470 * temporarily put back the original opcode to single-step, we
471 * single-stepped a copy of the instruction. The address of this
472 * copy is p->ainsn.insn.
474 * This function prepares to return from the post-single-step
475 * interrupt. We have to fix up the stack as follows:
477 * 0) Except in the case of absolute or indirect jump or call instructions,
478 * the new rip is relative to the copied instruction. We need to make
479 * it relative to the original instruction.
481 * 1) If the single-stepped instruction was pushfl, then the TF and IF
482 * flags are set in the just-pushed eflags, and may need to be cleared.
484 * 2) If the single-stepped instruction was a call, the return address
485 * that is atop the stack is the address following the copied instruction.
486 * We need to make it the address following the original instruction.
488 static void __kprobes resume_execution(struct kprobe *p,
489 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
491 unsigned long *tos = (unsigned long *)regs->rsp;
492 unsigned long next_rip = 0;
493 unsigned long copy_rip = (unsigned long)p->ainsn.insn;
494 unsigned long orig_rip = (unsigned long)p->addr;
495 kprobe_opcode_t *insn = p->ainsn.insn;
497 /*skip the REX prefix*/
498 if (*insn >= 0x40 && *insn <= 0x4f)
499 insn++;
501 switch (*insn) {
502 case 0x9c: /* pushfl */
503 *tos &= ~(TF_MASK | IF_MASK);
504 *tos |= kcb->kprobe_old_rflags;
505 break;
506 case 0xc3: /* ret/lret */
507 case 0xcb:
508 case 0xc2:
509 case 0xca:
510 regs->eflags &= ~TF_MASK;
511 /* rip is already adjusted, no more changes required*/
512 return;
513 case 0xe8: /* call relative - Fix return addr */
514 *tos = orig_rip + (*tos - copy_rip);
515 break;
516 case 0xff:
517 if ((*insn & 0x30) == 0x10) {
518 /* call absolute, indirect */
519 /* Fix return addr; rip is correct. */
520 next_rip = regs->rip;
521 *tos = orig_rip + (*tos - copy_rip);
522 } else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */
523 ((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */
524 /* rip is correct. */
525 next_rip = regs->rip;
527 break;
528 case 0xea: /* jmp absolute -- rip is correct */
529 next_rip = regs->rip;
530 break;
531 default:
532 break;
535 regs->eflags &= ~TF_MASK;
536 if (next_rip) {
537 regs->rip = next_rip;
538 } else {
539 regs->rip = orig_rip + (regs->rip - copy_rip);
543 int __kprobes post_kprobe_handler(struct pt_regs *regs)
545 struct kprobe *cur = kprobe_running();
546 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
548 if (!cur)
549 return 0;
551 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
552 kcb->kprobe_status = KPROBE_HIT_SSDONE;
553 cur->post_handler(cur, regs, 0);
556 resume_execution(cur, regs, kcb);
557 regs->eflags |= kcb->kprobe_saved_rflags;
559 /* Restore the original saved kprobes variables and continue. */
560 if (kcb->kprobe_status == KPROBE_REENTER) {
561 restore_previous_kprobe(kcb);
562 goto out;
564 reset_current_kprobe();
565 out:
566 preempt_enable_no_resched();
569 * if somebody else is singlestepping across a probe point, eflags
570 * will have TF set, in which case, continue the remaining processing
571 * of do_debug, as if this is not a probe hit.
573 if (regs->eflags & TF_MASK)
574 return 0;
576 return 1;
579 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
581 struct kprobe *cur = kprobe_running();
582 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
583 const struct exception_table_entry *fixup;
585 switch(kcb->kprobe_status) {
586 case KPROBE_HIT_SS:
587 case KPROBE_REENTER:
589 * We are here because the instruction being single
590 * stepped caused a page fault. We reset the current
591 * kprobe and the rip points back to the probe address
592 * and allow the page fault handler to continue as a
593 * normal page fault.
595 regs->rip = (unsigned long)cur->addr;
596 regs->eflags |= kcb->kprobe_old_rflags;
597 if (kcb->kprobe_status == KPROBE_REENTER)
598 restore_previous_kprobe(kcb);
599 else
600 reset_current_kprobe();
601 preempt_enable_no_resched();
602 break;
603 case KPROBE_HIT_ACTIVE:
604 case KPROBE_HIT_SSDONE:
606 * We increment the nmissed count for accounting,
607 * we can also use npre/npostfault count for accouting
608 * these specific fault cases.
610 kprobes_inc_nmissed_count(cur);
613 * We come here because instructions in the pre/post
614 * handler caused the page_fault, this could happen
615 * if handler tries to access user space by
616 * copy_from_user(), get_user() etc. Let the
617 * user-specified handler try to fix it first.
619 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
620 return 1;
623 * In case the user-specified fault handler returned
624 * zero, try to fix up.
626 fixup = search_exception_tables(regs->rip);
627 if (fixup) {
628 regs->rip = fixup->fixup;
629 return 1;
633 * fixup() could not handle it,
634 * Let do_page_fault() fix it.
636 break;
637 default:
638 break;
640 return 0;
644 * Wrapper routine for handling exceptions.
646 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
647 unsigned long val, void *data)
649 struct die_args *args = (struct die_args *)data;
650 int ret = NOTIFY_DONE;
652 if (args->regs && user_mode(args->regs))
653 return ret;
655 switch (val) {
656 case DIE_INT3:
657 if (kprobe_handler(args->regs))
658 ret = NOTIFY_STOP;
659 break;
660 case DIE_DEBUG:
661 if (post_kprobe_handler(args->regs))
662 ret = NOTIFY_STOP;
663 break;
664 case DIE_GPF:
665 case DIE_PAGE_FAULT:
666 /* kprobe_running() needs smp_processor_id() */
667 preempt_disable();
668 if (kprobe_running() &&
669 kprobe_fault_handler(args->regs, args->trapnr))
670 ret = NOTIFY_STOP;
671 preempt_enable();
672 break;
673 default:
674 break;
676 return ret;
679 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
681 struct jprobe *jp = container_of(p, struct jprobe, kp);
682 unsigned long addr;
683 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
685 kcb->jprobe_saved_regs = *regs;
686 kcb->jprobe_saved_rsp = (long *) regs->rsp;
687 addr = (unsigned long)(kcb->jprobe_saved_rsp);
689 * As Linus pointed out, gcc assumes that the callee
690 * owns the argument space and could overwrite it, e.g.
691 * tailcall optimization. So, to be absolutely safe
692 * we also save and restore enough stack bytes to cover
693 * the argument area.
695 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
696 MIN_STACK_SIZE(addr));
697 regs->eflags &= ~IF_MASK;
698 regs->rip = (unsigned long)(jp->entry);
699 return 1;
702 void __kprobes jprobe_return(void)
704 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
706 asm volatile (" xchg %%rbx,%%rsp \n"
707 " int3 \n"
708 " .globl jprobe_return_end \n"
709 " jprobe_return_end: \n"
710 " nop \n"::"b"
711 (kcb->jprobe_saved_rsp):"memory");
714 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
716 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
717 u8 *addr = (u8 *) (regs->rip - 1);
718 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
719 struct jprobe *jp = container_of(p, struct jprobe, kp);
721 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
722 if ((long *)regs->rsp != kcb->jprobe_saved_rsp) {
723 struct pt_regs *saved_regs =
724 container_of(kcb->jprobe_saved_rsp,
725 struct pt_regs, rsp);
726 printk("current rsp %p does not match saved rsp %p\n",
727 (long *)regs->rsp, kcb->jprobe_saved_rsp);
728 printk("Saved registers for jprobe %p\n", jp);
729 show_registers(saved_regs);
730 printk("Current registers\n");
731 show_registers(regs);
732 BUG();
734 *regs = kcb->jprobe_saved_regs;
735 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
736 MIN_STACK_SIZE(stack_addr));
737 preempt_enable_no_resched();
738 return 1;
740 return 0;
743 static struct kprobe trampoline_p = {
744 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
745 .pre_handler = trampoline_probe_handler
748 int __init arch_init_kprobes(void)
750 return register_kprobe(&trampoline_p);