1 /* MN10300 Kernel probes implementation
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by Mark Salter (msalter@redhat.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public Licence as published by
8 * the Free Software Foundation; either version 2 of the Licence, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public Licence for more details.
16 * You should have received a copy of the GNU General Public Licence
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #include <linux/kprobes.h>
21 #include <linux/ptrace.h>
22 #include <linux/spinlock.h>
23 #include <linux/preempt.h>
24 #include <linux/kdebug.h>
25 #include <asm/cacheflush.h>
27 struct kretprobe_blackpoint kretprobe_blacklist
[] = { { NULL
, NULL
} };
28 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
30 /* kprobe_status settings */
31 #define KPROBE_HIT_ACTIVE 0x00000001
32 #define KPROBE_HIT_SS 0x00000002
34 static struct kprobe
*current_kprobe
;
35 static unsigned long current_kprobe_orig_pc
;
36 static unsigned long current_kprobe_next_pc
;
37 static int current_kprobe_ss_flags
;
38 static unsigned long kprobe_status
;
39 static kprobe_opcode_t current_kprobe_ss_buf
[MAX_INSN_SIZE
+ 2];
40 static unsigned long current_kprobe_bp_addr
;
42 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
45 /* singlestep flag bits */
46 #define SINGLESTEP_BRANCH 1
47 #define SINGLESTEP_PCREL 2
49 #define READ_BYTE(p, valp) \
50 do { *(u8 *)(valp) = *(u8 *)(p); } while (0)
52 #define READ_WORD16(p, valp) \
54 READ_BYTE((p), (valp)); \
55 READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
58 #define READ_WORD32(p, valp) \
60 READ_BYTE((p), (valp)); \
61 READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
62 READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2); \
63 READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3); \
67 static const u8 mn10300_insn_sizes
[256] =
69 /* 1 2 3 4 5 6 7 8 9 a b c d e f */
70 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
71 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
72 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
73 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
74 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
75 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
76 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
77 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
78 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
79 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
80 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
81 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
82 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
83 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
84 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
85 0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */
104 static const u16 cond_table
[] = {
106 /* 0 0 0 0 */ (NE
| NC
| CC
| VC
| GE
| GT
| HI
),
107 /* 0 0 0 1 */ (EQ
| NC
| CC
| VC
| GE
| LE
| LS
),
108 /* 0 0 1 0 */ (NE
| NS
| CC
| VC
| LT
| LE
| HI
),
109 /* 0 0 1 1 */ (EQ
| NS
| CC
| VC
| LT
| LE
| LS
),
110 /* 0 1 0 0 */ (NE
| NC
| CS
| VC
| GE
| GT
| LS
),
111 /* 0 1 0 1 */ (EQ
| NC
| CS
| VC
| GE
| LE
| LS
),
112 /* 0 1 1 0 */ (NE
| NS
| CS
| VC
| LT
| LE
| LS
),
113 /* 0 1 1 1 */ (EQ
| NS
| CS
| VC
| LT
| LE
| LS
),
114 /* 1 0 0 0 */ (NE
| NC
| CC
| VS
| LT
| LE
| HI
),
115 /* 1 0 0 1 */ (EQ
| NC
| CC
| VS
| LT
| LE
| LS
),
116 /* 1 0 1 0 */ (NE
| NS
| CC
| VS
| GE
| GT
| HI
),
117 /* 1 0 1 1 */ (EQ
| NS
| CC
| VS
| GE
| LE
| LS
),
118 /* 1 1 0 0 */ (NE
| NC
| CS
| VS
| LT
| LE
| LS
),
119 /* 1 1 0 1 */ (EQ
| NC
| CS
| VS
| LT
| LE
| LS
),
120 /* 1 1 1 0 */ (NE
| NS
| CS
| VS
| GE
| GT
| LS
),
121 /* 1 1 1 1 */ (EQ
| NS
| CS
| VS
| GE
| LE
| LS
),
125 * Calculate what the PC will be after executing next instruction
127 static unsigned find_nextpc(struct pt_regs
*regs
, int *flags
)
133 u8 opc
, *pc
, *sp
, *next
;
136 *flags
= SINGLESTEP_PCREL
;
138 pc
= (u8
*) regs
->pc
;
139 sp
= (u8
*) (regs
+ 1);
142 size
= mn10300_insn_sizes
[opc
];
150 if (cond_table
[regs
->epsw
& 0xf] & (1 << (opc
& 0xf)))
153 *flags
|= SINGLESTEP_BRANCH
;
156 /* JMP (d16,PC) or CALL (d16,PC) */
159 READ_WORD16(pc
+ 1, &x16
);
161 *flags
|= SINGLESTEP_BRANCH
;
164 /* JMP (d32,PC) or CALL (d32,PC) */
167 READ_WORD32(pc
+ 1, &x32
);
169 *flags
|= SINGLESTEP_BRANCH
;
174 next
= (u8
*)regs
->mdr
;
175 *flags
&= ~SINGLESTEP_PCREL
;
176 *flags
|= SINGLESTEP_BRANCH
;
182 READ_WORD32(sp
, &x32
);
184 *flags
&= ~SINGLESTEP_PCREL
;
185 *flags
|= SINGLESTEP_BRANCH
;
191 if (opc
>= 0xf0 && opc
<= 0xf7) {
192 /* JMP (An) / CALLS (An) */
195 next
= (u8
*)regs
->a0
;
198 next
= (u8
*)regs
->a1
;
201 next
= (u8
*)regs
->a2
;
204 next
= (u8
*)regs
->a3
;
207 *flags
&= ~SINGLESTEP_PCREL
;
208 *flags
|= SINGLESTEP_BRANCH
;
209 } else if (opc
== 0xfc) {
211 READ_WORD32(sp
, &x32
);
213 *flags
&= ~SINGLESTEP_PCREL
;
214 *flags
|= SINGLESTEP_BRANCH
;
215 } else if (opc
== 0xfd) {
217 READ_WORD32(sp
+ 4, &x32
);
219 *flags
&= ~SINGLESTEP_PCREL
;
220 *flags
|= SINGLESTEP_BRANCH
;
224 /* potential 3-byte conditional branches */
228 if (opc
>= 0xe8 && opc
<= 0xeb &&
229 (cond_table
[regs
->epsw
& 0xf] &
230 (1 << ((opc
& 0xf) + 3)))
232 READ_BYTE(pc
+2, &x8
);
234 *flags
|= SINGLESTEP_BRANCH
;
241 READ_WORD16(pc
+ 2, &x16
);
245 *flags
|= SINGLESTEP_BRANCH
;
252 READ_WORD32(pc
+ 2, &x32
);
255 *flags
|= SINGLESTEP_BRANCH
;
258 /* SETLB - loads the next four bytes into the LIR reg */
261 panic("Can't singlestep Lxx/SETLB\n");
265 return (unsigned)next
;
270 * set up out of place singlestep of some branching instructions
272 static unsigned __kprobes
singlestep_branch_setup(struct pt_regs
*regs
)
274 u8 opc
, *pc
, *sp
, *next
;
277 pc
= (u8
*) regs
->pc
;
278 sp
= (u8
*) (regs
+ 1);
281 case 0xc0 ... 0xca: /* Bxx (d8,PC) */
282 case 0xcc: /* JMP (d16,PC) */
283 case 0xdc: /* JMP (d32,PC) */
284 case 0xf8: /* Bxx (d8,PC) 3-byte version */
285 /* don't really need to do anything except cause trap */
289 case 0xcd: /* CALL (d16,PC) */
295 case 0xdd: /* CALL (d32,PC) */
303 case 0xde: /* RETF */
305 regs
->mdr
= (unsigned) next
;
311 *(unsigned *)sp
= (unsigned) next
;
317 if (opc
>= 0xf0 && opc
<= 0xf3) {
319 /* use CALLS (d16,PC) to avoid mucking with An */
325 } else if (opc
>= 0xf4 && opc
<= 0xf7) {
328 } else if (opc
== 0xfc) {
331 *(unsigned *) sp
= (unsigned) next
;
332 } else if (opc
== 0xfd) {
335 *(unsigned *)(sp
+ 4) = (unsigned) next
;
339 case 0xfa: /* CALLS (d16,PC) */
345 case 0xfc: /* CALLS (d32,PC) */
353 case 0xd0 ... 0xda: /* LXX (d8,PC) */
354 case 0xdb: /* SETLB */
355 panic("Can't singlestep Lxx/SETLB\n");
358 return (unsigned) next
;
361 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
366 void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
368 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
);
371 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
373 *p
->addr
= BREAKPOINT_INSTRUCTION
;
374 flush_icache_range((unsigned long) p
->addr
,
375 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
378 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
380 mn10300_dcache_flush();
381 mn10300_icache_inv();
384 void arch_remove_kprobe(struct kprobe
*p
)
389 void __kprobes
disarm_kprobe(struct kprobe
*p
, struct pt_regs
*regs
)
391 *p
->addr
= p
->opcode
;
392 regs
->pc
= (unsigned long) p
->addr
;
393 mn10300_dcache_flush();
394 mn10300_icache_inv();
398 void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
400 unsigned long nextpc
;
402 current_kprobe_orig_pc
= regs
->pc
;
403 memcpy(current_kprobe_ss_buf
, &p
->ainsn
.insn
[0], MAX_INSN_SIZE
);
404 regs
->pc
= (unsigned long) current_kprobe_ss_buf
;
406 nextpc
= find_nextpc(regs
, ¤t_kprobe_ss_flags
);
407 if (current_kprobe_ss_flags
& SINGLESTEP_PCREL
)
408 current_kprobe_next_pc
=
409 current_kprobe_orig_pc
+ (nextpc
- regs
->pc
);
411 current_kprobe_next_pc
= nextpc
;
413 /* branching instructions need special handling */
414 if (current_kprobe_ss_flags
& SINGLESTEP_BRANCH
)
415 nextpc
= singlestep_branch_setup(regs
);
417 current_kprobe_bp_addr
= nextpc
;
419 *(u8
*) nextpc
= BREAKPOINT_INSTRUCTION
;
420 mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf
,
421 sizeof(current_kprobe_ss_buf
));
422 mn10300_icache_inv();
425 static inline int __kprobes
kprobe_handler(struct pt_regs
*regs
)
429 unsigned int *addr
= (unsigned int *) regs
->pc
;
431 /* We're in an interrupt, but this is clear and BUG()-safe. */
434 /* Check we're not actually recursing */
435 if (kprobe_running()) {
436 /* We *are* holding lock here, so this is safe.
437 Disarm the probe we just hit, and ignore it. */
438 p
= get_kprobe(addr
);
440 disarm_kprobe(p
, regs
);
444 if (p
->break_handler
&& p
->break_handler(p
, regs
))
447 /* If it's not ours, can't be delete race, (we hold lock). */
451 p
= get_kprobe(addr
);
453 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
454 /* The breakpoint instruction was removed right after
455 * we hit it. Another cpu has removed either a
456 * probepoint or a debugger breakpoint at this address.
457 * In either case, no further handling of this
458 * interrupt is appropriate.
462 /* Not one of ours: let kernel handle it */
466 kprobe_status
= KPROBE_HIT_ACTIVE
;
468 if (p
->pre_handler(p
, regs
)) {
469 /* handler has already set things up, so skip ss setup */
474 prepare_singlestep(p
, regs
);
475 kprobe_status
= KPROBE_HIT_SS
;
479 preempt_enable_no_resched();
484 * Called after single-stepping. p->addr is the address of the
485 * instruction whose first byte has been replaced by the "breakpoint"
486 * instruction. To avoid the SMP problems that can occur when we
487 * temporarily put back the original opcode to single-step, we
488 * single-stepped a copy of the instruction. The address of this
489 * copy is p->ainsn.insn.
491 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
493 /* we may need to fixup regs/stack after singlestepping a call insn */
494 if (current_kprobe_ss_flags
& SINGLESTEP_BRANCH
) {
495 regs
->pc
= current_kprobe_orig_pc
;
496 switch (p
->ainsn
.insn
[0]) {
497 case 0xcd: /* CALL (d16,PC) */
498 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 5;
500 case 0xdd: /* CALL (d32,PC) */
501 /* fixup mdr and return address on stack */
502 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 7;
505 if (p
->ainsn
.insn
[1] >= 0xf0 &&
506 p
->ainsn
.insn
[1] <= 0xf3) {
508 /* fixup MDR and return address on stack */
509 regs
->mdr
= regs
->pc
+ 2;
510 *(unsigned *) regs
->sp
= regs
->mdr
;
514 case 0xfa: /* CALLS (d16,PC) */
515 /* fixup MDR and return address on stack */
516 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 4;
519 case 0xfc: /* CALLS (d32,PC) */
520 /* fixup MDR and return address on stack */
521 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 6;
526 regs
->pc
= current_kprobe_next_pc
;
527 current_kprobe_bp_addr
= 0;
530 static inline int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
532 if (!kprobe_running())
535 if (current_kprobe
->post_handler
)
536 current_kprobe
->post_handler(current_kprobe
, regs
, 0);
538 resume_execution(current_kprobe
, regs
);
539 reset_current_kprobe();
540 preempt_enable_no_resched();
544 /* Interrupts disabled, kprobe_lock held. */
546 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
548 if (current_kprobe
->fault_handler
&&
549 current_kprobe
->fault_handler(current_kprobe
, regs
, trapnr
))
552 if (kprobe_status
& KPROBE_HIT_SS
) {
553 resume_execution(current_kprobe
, regs
);
554 reset_current_kprobe();
555 preempt_enable_no_resched();
561 * Wrapper routine to for handling exceptions.
563 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
564 unsigned long val
, void *data
)
566 struct die_args
*args
= data
;
570 if (current_kprobe_bp_addr
!= args
->regs
->pc
) {
571 if (kprobe_handler(args
->regs
))
574 if (post_kprobe_handler(args
->regs
))
579 if (kprobe_running() &&
580 kprobe_fault_handler(args
->regs
, args
->trapnr
))
589 /* Jprobes support. */
590 static struct pt_regs jprobe_saved_regs
;
591 static struct pt_regs
*jprobe_saved_regs_location
;
592 static kprobe_opcode_t jprobe_saved_stack
[MAX_STACK_SIZE
];
594 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
596 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
598 jprobe_saved_regs_location
= regs
;
599 memcpy(&jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
601 /* Save a whole stack frame, this gets arguments
602 * pushed onto the stack after using up all the
605 memcpy(&jprobe_saved_stack
, regs
+ 1, sizeof(jprobe_saved_stack
));
607 /* setup return addr to the jprobe handler routine */
608 regs
->pc
= (unsigned long) jp
->entry
;
612 void __kprobes
jprobe_return(void)
614 void *orig_sp
= jprobe_saved_regs_location
+ 1;
616 preempt_enable_no_resched();
617 asm volatile(" mov %0,sp\n"
618 ".globl jprobe_return_bp_addr\n"
619 "jprobe_return_bp_addr:\n\t"
624 extern void jprobe_return_bp_addr(void);
626 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
628 u8
*addr
= (u8
*) regs
->pc
;
630 if (addr
== (u8
*) jprobe_return_bp_addr
) {
631 if (jprobe_saved_regs_location
!= regs
) {
632 printk(KERN_ERR
"JPROBE:"
633 " Current regs (%p) does not match saved regs"
635 regs
, jprobe_saved_regs_location
);
639 /* Restore old register state.
641 memcpy(regs
, &jprobe_saved_regs
, sizeof(struct pt_regs
));
643 memcpy(regs
+ 1, &jprobe_saved_stack
,
644 sizeof(jprobe_saved_stack
));
650 int __init
arch_init_kprobes(void)