[PATCH] x86: kprobes-booster
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / i386 / kernel / kprobes.c
blob137bf612141b38b8808586677c4ae1c0ca427341
1 /*
2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
37 #include <asm/desc.h>
39 void jprobe_return_end(void);
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44 /* insert a jmp code */
45 static inline void set_jmp_op(void *from, void *to)
47 struct __arch_jmp_op {
48 char op;
49 long raddr;
50 } __attribute__((packed)) *jop;
51 jop = (struct __arch_jmp_op *)from;
52 jop->raddr = (long)(to) - ((long)(from) + 5);
53 jop->op = RELATIVEJUMP_INSTRUCTION;
57 * returns non-zero if opcodes can be boosted.
59 static inline int can_boost(kprobe_opcode_t opcode)
61 switch (opcode & 0xf0 ) {
62 case 0x70:
63 return 0; /* can't boost conditional jump */
64 case 0x90:
65 /* can't boost call and pushf */
66 return opcode != 0x9a && opcode != 0x9c;
67 case 0xc0:
68 /* can't boost undefined opcodes and soft-interruptions */
69 return (0xc1 < opcode && opcode < 0xc6) ||
70 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
71 case 0xd0:
72 /* can boost AA* and XLAT */
73 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
74 case 0xe0:
75 /* can boost in/out and (may be) jmps */
76 return (0xe3 < opcode && opcode != 0xe8);
77 case 0xf0:
78 /* clear and set flags can be boost */
79 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
80 default:
81 /* currently, can't boost 2 bytes opcodes */
82 return opcode != 0x0f;
88 * returns non-zero if opcode modifies the interrupt flag.
90 static inline int is_IF_modifier(kprobe_opcode_t opcode)
92 switch (opcode) {
93 case 0xfa: /* cli */
94 case 0xfb: /* sti */
95 case 0xcf: /* iret/iretd */
96 case 0x9d: /* popf/popfd */
97 return 1;
99 return 0;
102 int __kprobes arch_prepare_kprobe(struct kprobe *p)
104 /* insn: must be on special executable page on i386. */
105 p->ainsn.insn = get_insn_slot();
106 if (!p->ainsn.insn)
107 return -ENOMEM;
109 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
110 p->opcode = *p->addr;
111 if (can_boost(p->opcode)) {
112 p->ainsn.boostable = 0;
113 } else {
114 p->ainsn.boostable = -1;
116 return 0;
119 void __kprobes arch_arm_kprobe(struct kprobe *p)
121 *p->addr = BREAKPOINT_INSTRUCTION;
122 flush_icache_range((unsigned long) p->addr,
123 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
126 void __kprobes arch_disarm_kprobe(struct kprobe *p)
128 *p->addr = p->opcode;
129 flush_icache_range((unsigned long) p->addr,
130 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
133 void __kprobes arch_remove_kprobe(struct kprobe *p)
135 mutex_lock(&kprobe_mutex);
136 free_insn_slot(p->ainsn.insn);
137 mutex_unlock(&kprobe_mutex);
140 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
142 kcb->prev_kprobe.kp = kprobe_running();
143 kcb->prev_kprobe.status = kcb->kprobe_status;
144 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
145 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
148 static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
150 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
151 kcb->kprobe_status = kcb->prev_kprobe.status;
152 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
153 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
156 static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
157 struct kprobe_ctlblk *kcb)
159 __get_cpu_var(current_kprobe) = p;
160 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
161 = (regs->eflags & (TF_MASK | IF_MASK));
162 if (is_IF_modifier(p->opcode))
163 kcb->kprobe_saved_eflags &= ~IF_MASK;
166 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
168 regs->eflags |= TF_MASK;
169 regs->eflags &= ~IF_MASK;
170 /*single step inline if the instruction is an int3*/
171 if (p->opcode == BREAKPOINT_INSTRUCTION)
172 regs->eip = (unsigned long)p->addr;
173 else
174 regs->eip = (unsigned long)p->ainsn.insn;
177 /* Called with kretprobe_lock held */
178 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
179 struct pt_regs *regs)
181 unsigned long *sara = (unsigned long *)&regs->esp;
182 struct kretprobe_instance *ri;
184 if ((ri = get_free_rp_inst(rp)) != NULL) {
185 ri->rp = rp;
186 ri->task = current;
187 ri->ret_addr = (kprobe_opcode_t *) *sara;
189 /* Replace the return addr with trampoline addr */
190 *sara = (unsigned long) &kretprobe_trampoline;
192 add_rp_inst(ri);
193 } else {
194 rp->nmissed++;
199 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
200 * remain disabled thorough out this function.
202 static int __kprobes kprobe_handler(struct pt_regs *regs)
204 struct kprobe *p;
205 int ret = 0;
206 kprobe_opcode_t *addr = NULL;
207 unsigned long *lp;
208 struct kprobe_ctlblk *kcb;
209 #ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count();
211 #endif /* CONFIG_PREEMPT */
214 * We don't want to be preempted for the entire
215 * duration of kprobe processing
217 preempt_disable();
218 kcb = get_kprobe_ctlblk();
220 /* Check if the application is using LDT entry for its code segment and
221 * calculate the address by reading the base address from the LDT entry.
223 if ((regs->xcs & 4) && (current->mm)) {
224 lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8)
225 + (char *) current->mm->context.ldt);
226 addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip -
227 sizeof(kprobe_opcode_t));
228 } else {
229 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
231 /* Check we're not actually recursing */
232 if (kprobe_running()) {
233 p = get_kprobe(addr);
234 if (p) {
235 if (kcb->kprobe_status == KPROBE_HIT_SS &&
236 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
237 regs->eflags &= ~TF_MASK;
238 regs->eflags |= kcb->kprobe_saved_eflags;
239 goto no_kprobe;
241 /* We have reentered the kprobe_handler(), since
242 * another probe was hit while within the handler.
243 * We here save the original kprobes variables and
244 * just single step on the instruction of the new probe
245 * without calling any user handlers.
247 save_previous_kprobe(kcb);
248 set_current_kprobe(p, regs, kcb);
249 kprobes_inc_nmissed_count(p);
250 prepare_singlestep(p, regs);
251 kcb->kprobe_status = KPROBE_REENTER;
252 return 1;
253 } else {
254 if (regs->eflags & VM_MASK) {
255 /* We are in virtual-8086 mode. Return 0 */
256 goto no_kprobe;
258 if (*addr != BREAKPOINT_INSTRUCTION) {
259 /* The breakpoint instruction was removed by
260 * another cpu right after we hit, no further
261 * handling of this interrupt is appropriate
263 regs->eip -= sizeof(kprobe_opcode_t);
264 ret = 1;
265 goto no_kprobe;
267 p = __get_cpu_var(current_kprobe);
268 if (p->break_handler && p->break_handler(p, regs)) {
269 goto ss_probe;
272 goto no_kprobe;
275 p = get_kprobe(addr);
276 if (!p) {
277 if (regs->eflags & VM_MASK) {
278 /* We are in virtual-8086 mode. Return 0 */
279 goto no_kprobe;
282 if (*addr != BREAKPOINT_INSTRUCTION) {
284 * The breakpoint instruction was removed right
285 * after we hit it. Another cpu has removed
286 * either a probepoint or a debugger breakpoint
287 * at this address. In either case, no further
288 * handling of this interrupt is appropriate.
289 * Back up over the (now missing) int3 and run
290 * the original instruction.
292 regs->eip -= sizeof(kprobe_opcode_t);
293 ret = 1;
295 /* Not one of ours: let kernel handle it */
296 goto no_kprobe;
299 set_current_kprobe(p, regs, kcb);
300 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
302 if (p->pre_handler && p->pre_handler(p, regs))
303 /* handler has already set things up, so skip ss setup */
304 return 1;
306 if (p->ainsn.boostable == 1 &&
307 #ifdef CONFIG_PREEMPT
308 !(pre_preempt_count) && /*
309 * This enables booster when the direct
310 * execution path aren't preempted.
312 #endif /* CONFIG_PREEMPT */
313 !p->post_handler && !p->break_handler ) {
314 /* Boost up -- we can execute copied instructions directly */
315 reset_current_kprobe();
316 regs->eip = (unsigned long)p->ainsn.insn;
317 preempt_enable_no_resched();
318 return 1;
321 ss_probe:
322 prepare_singlestep(p, regs);
323 kcb->kprobe_status = KPROBE_HIT_SS;
324 return 1;
326 no_kprobe:
327 preempt_enable_no_resched();
328 return ret;
332 * For function-return probes, init_kprobes() establishes a probepoint
333 * here. When a retprobed function returns, this probe is hit and
334 * trampoline_probe_handler() runs, calling the kretprobe's handler.
336 void kretprobe_trampoline_holder(void)
338 asm volatile ( ".global kretprobe_trampoline\n"
339 "kretprobe_trampoline: \n"
340 "nop\n");
344 * Called when we hit the probe point at kretprobe_trampoline
346 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
348 struct kretprobe_instance *ri = NULL;
349 struct hlist_head *head;
350 struct hlist_node *node, *tmp;
351 unsigned long flags, orig_ret_address = 0;
352 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
354 spin_lock_irqsave(&kretprobe_lock, flags);
355 head = kretprobe_inst_table_head(current);
358 * It is possible to have multiple instances associated with a given
359 * task either because an multiple functions in the call path
360 * have a return probe installed on them, and/or more then one return
361 * return probe was registered for a target function.
363 * We can handle this because:
364 * - instances are always inserted at the head of the list
365 * - when multiple return probes are registered for the same
366 * function, the first instance's ret_addr will point to the
367 * real return address, and all the rest will point to
368 * kretprobe_trampoline
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current)
372 /* another task is sharing our hash bucket */
373 continue;
375 if (ri->rp && ri->rp->handler)
376 ri->rp->handler(ri, regs);
378 orig_ret_address = (unsigned long)ri->ret_addr;
379 recycle_rp_inst(ri);
381 if (orig_ret_address != trampoline_address)
383 * This is the real return address. Any other
384 * instances associated with this task are for
385 * other calls deeper on the call stack
387 break;
390 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
391 regs->eip = orig_ret_address;
393 reset_current_kprobe();
394 spin_unlock_irqrestore(&kretprobe_lock, flags);
395 preempt_enable_no_resched();
398 * By returning a non-zero value, we are telling
399 * kprobe_handler() that we don't want the post_handler
400 * to run (and have re-enabled preemption)
402 return 1;
406 * Called after single-stepping. p->addr is the address of the
407 * instruction whose first byte has been replaced by the "int 3"
408 * instruction. To avoid the SMP problems that can occur when we
409 * temporarily put back the original opcode to single-step, we
410 * single-stepped a copy of the instruction. The address of this
411 * copy is p->ainsn.insn.
413 * This function prepares to return from the post-single-step
414 * interrupt. We have to fix up the stack as follows:
416 * 0) Except in the case of absolute or indirect jump or call instructions,
417 * the new eip is relative to the copied instruction. We need to make
418 * it relative to the original instruction.
420 * 1) If the single-stepped instruction was pushfl, then the TF and IF
421 * flags are set in the just-pushed eflags, and may need to be cleared.
423 * 2) If the single-stepped instruction was a call, the return address
424 * that is atop the stack is the address following the copied instruction.
425 * We need to make it the address following the original instruction.
427 * This function also checks instruction size for preparing direct execution.
429 static void __kprobes resume_execution(struct kprobe *p,
430 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
432 unsigned long *tos = (unsigned long *)&regs->esp;
433 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
434 unsigned long orig_eip = (unsigned long)p->addr;
436 regs->eflags &= ~TF_MASK;
437 switch (p->ainsn.insn[0]) {
438 case 0x9c: /* pushfl */
439 *tos &= ~(TF_MASK | IF_MASK);
440 *tos |= kcb->kprobe_old_eflags;
441 break;
442 case 0xc3: /* ret/lret */
443 case 0xcb:
444 case 0xc2:
445 case 0xca:
446 case 0xea: /* jmp absolute -- eip is correct */
447 /* eip is already adjusted, no more changes required */
448 p->ainsn.boostable = 1;
449 goto no_change;
450 case 0xe8: /* call relative - Fix return addr */
451 *tos = orig_eip + (*tos - copy_eip);
452 break;
453 case 0xff:
454 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
455 /* call absolute, indirect */
457 * Fix return addr; eip is correct.
458 * But this is not boostable
460 *tos = orig_eip + (*tos - copy_eip);
461 goto no_change;
462 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
463 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
464 /* eip is correct. And this is boostable */
465 p->ainsn.boostable = 1;
466 goto no_change;
468 default:
469 break;
472 if (p->ainsn.boostable == 0) {
473 if ((regs->eip > copy_eip) &&
474 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
476 * These instructions can be executed directly if it
477 * jumps back to correct address.
479 set_jmp_op((void *)regs->eip,
480 (void *)orig_eip + (regs->eip - copy_eip));
481 p->ainsn.boostable = 1;
482 } else {
483 p->ainsn.boostable = -1;
487 regs->eip = orig_eip + (regs->eip - copy_eip);
489 no_change:
490 return;
494 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
495 * remain disabled thoroughout this function.
497 static inline int post_kprobe_handler(struct pt_regs *regs)
499 struct kprobe *cur = kprobe_running();
500 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
502 if (!cur)
503 return 0;
505 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
506 kcb->kprobe_status = KPROBE_HIT_SSDONE;
507 cur->post_handler(cur, regs, 0);
510 resume_execution(cur, regs, kcb);
511 regs->eflags |= kcb->kprobe_saved_eflags;
513 /*Restore back the original saved kprobes variables and continue. */
514 if (kcb->kprobe_status == KPROBE_REENTER) {
515 restore_previous_kprobe(kcb);
516 goto out;
518 reset_current_kprobe();
519 out:
520 preempt_enable_no_resched();
523 * if somebody else is singlestepping across a probe point, eflags
524 * will have TF set, in which case, continue the remaining processing
525 * of do_debug, as if this is not a probe hit.
527 if (regs->eflags & TF_MASK)
528 return 0;
530 return 1;
533 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
535 struct kprobe *cur = kprobe_running();
536 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
538 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
539 return 1;
541 if (kcb->kprobe_status & KPROBE_HIT_SS) {
542 resume_execution(cur, regs, kcb);
543 regs->eflags |= kcb->kprobe_old_eflags;
545 reset_current_kprobe();
546 preempt_enable_no_resched();
548 return 0;
552 * Wrapper routine to for handling exceptions.
554 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
555 unsigned long val, void *data)
557 struct die_args *args = (struct die_args *)data;
558 int ret = NOTIFY_DONE;
560 switch (val) {
561 case DIE_INT3:
562 if (kprobe_handler(args->regs))
563 ret = NOTIFY_STOP;
564 break;
565 case DIE_DEBUG:
566 if (post_kprobe_handler(args->regs))
567 ret = NOTIFY_STOP;
568 break;
569 case DIE_GPF:
570 case DIE_PAGE_FAULT:
571 /* kprobe_running() needs smp_processor_id() */
572 preempt_disable();
573 if (kprobe_running() &&
574 kprobe_fault_handler(args->regs, args->trapnr))
575 ret = NOTIFY_STOP;
576 preempt_enable();
577 break;
578 default:
579 break;
581 return ret;
584 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
586 struct jprobe *jp = container_of(p, struct jprobe, kp);
587 unsigned long addr;
588 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
590 kcb->jprobe_saved_regs = *regs;
591 kcb->jprobe_saved_esp = &regs->esp;
592 addr = (unsigned long)(kcb->jprobe_saved_esp);
595 * TBD: As Linus pointed out, gcc assumes that the callee
596 * owns the argument space and could overwrite it, e.g.
597 * tailcall optimization. So, to be absolutely safe
598 * we also save and restore enough stack bytes to cover
599 * the argument area.
601 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
602 MIN_STACK_SIZE(addr));
603 regs->eflags &= ~IF_MASK;
604 regs->eip = (unsigned long)(jp->entry);
605 return 1;
608 void __kprobes jprobe_return(void)
610 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
612 asm volatile (" xchgl %%ebx,%%esp \n"
613 " int3 \n"
614 " .globl jprobe_return_end \n"
615 " jprobe_return_end: \n"
616 " nop \n"::"b"
617 (kcb->jprobe_saved_esp):"memory");
620 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
622 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
623 u8 *addr = (u8 *) (regs->eip - 1);
624 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
625 struct jprobe *jp = container_of(p, struct jprobe, kp);
627 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
628 if (&regs->esp != kcb->jprobe_saved_esp) {
629 struct pt_regs *saved_regs =
630 container_of(kcb->jprobe_saved_esp,
631 struct pt_regs, esp);
632 printk("current esp %p does not match saved esp %p\n",
633 &regs->esp, kcb->jprobe_saved_esp);
634 printk("Saved registers for jprobe %p\n", jp);
635 show_registers(saved_regs);
636 printk("Current registers\n");
637 show_registers(regs);
638 BUG();
640 *regs = kcb->jprobe_saved_regs;
641 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
642 MIN_STACK_SIZE(stack_addr));
643 preempt_enable_no_resched();
644 return 1;
646 return 0;
649 static struct kprobe trampoline_p = {
650 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
651 .pre_handler = trampoline_probe_handler
654 int __init arch_init_kprobes(void)
656 return register_kprobe(&trampoline_p);