1 /* MN10300 Kernel probes implementation
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by Mark Salter (msalter@redhat.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public Licence as published by
8 * the Free Software Foundation; either version 2 of the Licence, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public Licence for more details.
16 * You should have received a copy of the GNU General Public Licence
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #include <linux/kprobes.h>
21 #include <linux/ptrace.h>
22 #include <linux/spinlock.h>
23 #include <linux/preempt.h>
24 #include <linux/kdebug.h>
25 #include <asm/cacheflush.h>
27 struct kretprobe_blackpoint kretprobe_blacklist
[] = { { NULL
, NULL
} };
28 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
30 /* kprobe_status settings */
31 #define KPROBE_HIT_ACTIVE 0x00000001
32 #define KPROBE_HIT_SS 0x00000002
34 static struct kprobe
*cur_kprobe
;
35 static unsigned long cur_kprobe_orig_pc
;
36 static unsigned long cur_kprobe_next_pc
;
37 static int cur_kprobe_ss_flags
;
38 static unsigned long kprobe_status
;
39 static kprobe_opcode_t cur_kprobe_ss_buf
[MAX_INSN_SIZE
+ 2];
40 static unsigned long cur_kprobe_bp_addr
;
42 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
45 /* singlestep flag bits */
46 #define SINGLESTEP_BRANCH 1
47 #define SINGLESTEP_PCREL 2
49 #define READ_BYTE(p, valp) \
50 do { *(u8 *)(valp) = *(u8 *)(p); } while (0)
52 #define READ_WORD16(p, valp) \
54 READ_BYTE((p), (valp)); \
55 READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
58 #define READ_WORD32(p, valp) \
60 READ_BYTE((p), (valp)); \
61 READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \
62 READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2); \
63 READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3); \
67 static const u8 mn10300_insn_sizes
[256] =
69 /* 1 2 3 4 5 6 7 8 9 a b c d e f */
70 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
71 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
72 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
73 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
74 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
75 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
76 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
77 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
78 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
79 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
80 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
81 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
82 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
83 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
84 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
85 0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */
104 static const u16 cond_table
[] = {
106 /* 0 0 0 0 */ (NE
| NC
| CC
| VC
| GE
| GT
| HI
),
107 /* 0 0 0 1 */ (EQ
| NC
| CC
| VC
| GE
| LE
| LS
),
108 /* 0 0 1 0 */ (NE
| NS
| CC
| VC
| LT
| LE
| HI
),
109 /* 0 0 1 1 */ (EQ
| NS
| CC
| VC
| LT
| LE
| LS
),
110 /* 0 1 0 0 */ (NE
| NC
| CS
| VC
| GE
| GT
| LS
),
111 /* 0 1 0 1 */ (EQ
| NC
| CS
| VC
| GE
| LE
| LS
),
112 /* 0 1 1 0 */ (NE
| NS
| CS
| VC
| LT
| LE
| LS
),
113 /* 0 1 1 1 */ (EQ
| NS
| CS
| VC
| LT
| LE
| LS
),
114 /* 1 0 0 0 */ (NE
| NC
| CC
| VS
| LT
| LE
| HI
),
115 /* 1 0 0 1 */ (EQ
| NC
| CC
| VS
| LT
| LE
| LS
),
116 /* 1 0 1 0 */ (NE
| NS
| CC
| VS
| GE
| GT
| HI
),
117 /* 1 0 1 1 */ (EQ
| NS
| CC
| VS
| GE
| LE
| LS
),
118 /* 1 1 0 0 */ (NE
| NC
| CS
| VS
| LT
| LE
| LS
),
119 /* 1 1 0 1 */ (EQ
| NC
| CS
| VS
| LT
| LE
| LS
),
120 /* 1 1 1 0 */ (NE
| NS
| CS
| VS
| GE
| GT
| LS
),
121 /* 1 1 1 1 */ (EQ
| NS
| CS
| VS
| GE
| LE
| LS
),
125 * Calculate what the PC will be after executing next instruction
127 static unsigned find_nextpc(struct pt_regs
*regs
, int *flags
)
133 u8 opc
, *pc
, *sp
, *next
;
136 *flags
= SINGLESTEP_PCREL
;
138 pc
= (u8
*) regs
->pc
;
139 sp
= (u8
*) (regs
+ 1);
142 size
= mn10300_insn_sizes
[opc
];
150 if (cond_table
[regs
->epsw
& 0xf] & (1 << (opc
& 0xf)))
153 *flags
|= SINGLESTEP_BRANCH
;
156 /* JMP (d16,PC) or CALL (d16,PC) */
159 READ_WORD16(pc
+ 1, &x16
);
161 *flags
|= SINGLESTEP_BRANCH
;
164 /* JMP (d32,PC) or CALL (d32,PC) */
167 READ_WORD32(pc
+ 1, &x32
);
169 *flags
|= SINGLESTEP_BRANCH
;
174 next
= (u8
*)regs
->mdr
;
175 *flags
&= ~SINGLESTEP_PCREL
;
176 *flags
|= SINGLESTEP_BRANCH
;
182 READ_WORD32(sp
, &x32
);
184 *flags
&= ~SINGLESTEP_PCREL
;
185 *flags
|= SINGLESTEP_BRANCH
;
191 if (opc
>= 0xf0 && opc
<= 0xf7) {
192 /* JMP (An) / CALLS (An) */
195 next
= (u8
*)regs
->a0
;
198 next
= (u8
*)regs
->a1
;
201 next
= (u8
*)regs
->a2
;
204 next
= (u8
*)regs
->a3
;
207 *flags
&= ~SINGLESTEP_PCREL
;
208 *flags
|= SINGLESTEP_BRANCH
;
209 } else if (opc
== 0xfc) {
211 READ_WORD32(sp
, &x32
);
213 *flags
&= ~SINGLESTEP_PCREL
;
214 *flags
|= SINGLESTEP_BRANCH
;
215 } else if (opc
== 0xfd) {
217 READ_WORD32(sp
+ 4, &x32
);
219 *flags
&= ~SINGLESTEP_PCREL
;
220 *flags
|= SINGLESTEP_BRANCH
;
224 /* potential 3-byte conditional branches */
228 if (opc
>= 0xe8 && opc
<= 0xeb &&
229 (cond_table
[regs
->epsw
& 0xf] &
230 (1 << ((opc
& 0xf) + 3)))
232 READ_BYTE(pc
+2, &x8
);
234 *flags
|= SINGLESTEP_BRANCH
;
241 READ_WORD16(pc
+ 2, &x16
);
245 *flags
|= SINGLESTEP_BRANCH
;
252 READ_WORD32(pc
+ 2, &x32
);
255 *flags
|= SINGLESTEP_BRANCH
;
258 /* SETLB - loads the next four bytes into the LIR reg */
261 panic("Can't singlestep Lxx/SETLB\n");
265 return (unsigned)next
;
270 * set up out of place singlestep of some branching instructions
272 static unsigned __kprobes
singlestep_branch_setup(struct pt_regs
*regs
)
274 u8 opc
, *pc
, *sp
, *next
;
277 pc
= (u8
*) regs
->pc
;
278 sp
= (u8
*) (regs
+ 1);
281 case 0xc0 ... 0xca: /* Bxx (d8,PC) */
282 case 0xcc: /* JMP (d16,PC) */
283 case 0xdc: /* JMP (d32,PC) */
284 case 0xf8: /* Bxx (d8,PC) 3-byte version */
285 /* don't really need to do anything except cause trap */
289 case 0xcd: /* CALL (d16,PC) */
295 case 0xdd: /* CALL (d32,PC) */
303 case 0xde: /* RETF */
305 regs
->mdr
= (unsigned) next
;
311 *(unsigned *)sp
= (unsigned) next
;
317 if (opc
>= 0xf0 && opc
<= 0xf3) {
319 /* use CALLS (d16,PC) to avoid mucking with An */
325 } else if (opc
>= 0xf4 && opc
<= 0xf7) {
328 } else if (opc
== 0xfc) {
331 *(unsigned *) sp
= (unsigned) next
;
332 } else if (opc
== 0xfd) {
335 *(unsigned *)(sp
+ 4) = (unsigned) next
;
339 case 0xfa: /* CALLS (d16,PC) */
345 case 0xfc: /* CALLS (d32,PC) */
353 case 0xd0 ... 0xda: /* LXX (d8,PC) */
354 case 0xdb: /* SETLB */
355 panic("Can't singlestep Lxx/SETLB\n");
358 return (unsigned) next
;
361 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
366 void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
368 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
);
371 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
373 *p
->addr
= BREAKPOINT_INSTRUCTION
;
374 flush_icache_range((unsigned long) p
->addr
,
375 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
378 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
380 mn10300_dcache_flush();
381 mn10300_icache_inv();
384 void arch_remove_kprobe(struct kprobe
*p
)
389 void __kprobes
disarm_kprobe(struct kprobe
*p
, struct pt_regs
*regs
)
391 *p
->addr
= p
->opcode
;
392 regs
->pc
= (unsigned long) p
->addr
;
393 mn10300_dcache_flush();
394 mn10300_icache_inv();
398 void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
400 unsigned long nextpc
;
402 cur_kprobe_orig_pc
= regs
->pc
;
403 memcpy(cur_kprobe_ss_buf
, &p
->ainsn
.insn
[0], MAX_INSN_SIZE
);
404 regs
->pc
= (unsigned long) cur_kprobe_ss_buf
;
406 nextpc
= find_nextpc(regs
, &cur_kprobe_ss_flags
);
407 if (cur_kprobe_ss_flags
& SINGLESTEP_PCREL
)
408 cur_kprobe_next_pc
= cur_kprobe_orig_pc
+ (nextpc
- regs
->pc
);
410 cur_kprobe_next_pc
= nextpc
;
412 /* branching instructions need special handling */
413 if (cur_kprobe_ss_flags
& SINGLESTEP_BRANCH
)
414 nextpc
= singlestep_branch_setup(regs
);
416 cur_kprobe_bp_addr
= nextpc
;
418 *(u8
*) nextpc
= BREAKPOINT_INSTRUCTION
;
419 mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf
,
420 sizeof(cur_kprobe_ss_buf
));
421 mn10300_icache_inv();
424 static inline int __kprobes
kprobe_handler(struct pt_regs
*regs
)
428 unsigned int *addr
= (unsigned int *) regs
->pc
;
430 /* We're in an interrupt, but this is clear and BUG()-safe. */
433 /* Check we're not actually recursing */
434 if (kprobe_running()) {
435 /* We *are* holding lock here, so this is safe.
436 Disarm the probe we just hit, and ignore it. */
437 p
= get_kprobe(addr
);
439 disarm_kprobe(p
, regs
);
443 if (p
->break_handler
&& p
->break_handler(p
, regs
))
446 /* If it's not ours, can't be delete race, (we hold lock). */
450 p
= get_kprobe(addr
);
452 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
453 /* The breakpoint instruction was removed right after
454 * we hit it. Another cpu has removed either a
455 * probepoint or a debugger breakpoint at this address.
456 * In either case, no further handling of this
457 * interrupt is appropriate.
461 /* Not one of ours: let kernel handle it */
465 kprobe_status
= KPROBE_HIT_ACTIVE
;
467 if (p
->pre_handler(p
, regs
)) {
468 /* handler has already set things up, so skip ss setup */
473 prepare_singlestep(p
, regs
);
474 kprobe_status
= KPROBE_HIT_SS
;
478 preempt_enable_no_resched();
483 * Called after single-stepping. p->addr is the address of the
484 * instruction whose first byte has been replaced by the "breakpoint"
485 * instruction. To avoid the SMP problems that can occur when we
486 * temporarily put back the original opcode to single-step, we
487 * single-stepped a copy of the instruction. The address of this
488 * copy is p->ainsn.insn.
490 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
492 /* we may need to fixup regs/stack after singlestepping a call insn */
493 if (cur_kprobe_ss_flags
& SINGLESTEP_BRANCH
) {
494 regs
->pc
= cur_kprobe_orig_pc
;
495 switch (p
->ainsn
.insn
[0]) {
496 case 0xcd: /* CALL (d16,PC) */
497 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 5;
499 case 0xdd: /* CALL (d32,PC) */
500 /* fixup mdr and return address on stack */
501 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 7;
504 if (p
->ainsn
.insn
[1] >= 0xf0 &&
505 p
->ainsn
.insn
[1] <= 0xf3) {
507 /* fixup MDR and return address on stack */
508 regs
->mdr
= regs
->pc
+ 2;
509 *(unsigned *) regs
->sp
= regs
->mdr
;
513 case 0xfa: /* CALLS (d16,PC) */
514 /* fixup MDR and return address on stack */
515 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 4;
518 case 0xfc: /* CALLS (d32,PC) */
519 /* fixup MDR and return address on stack */
520 *(unsigned *) regs
->sp
= regs
->mdr
= regs
->pc
+ 6;
525 regs
->pc
= cur_kprobe_next_pc
;
526 cur_kprobe_bp_addr
= 0;
529 static inline int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
531 if (!kprobe_running())
534 if (cur_kprobe
->post_handler
)
535 cur_kprobe
->post_handler(cur_kprobe
, regs
, 0);
537 resume_execution(cur_kprobe
, regs
);
538 reset_current_kprobe();
539 preempt_enable_no_resched();
543 /* Interrupts disabled, kprobe_lock held. */
545 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
547 if (cur_kprobe
->fault_handler
&&
548 cur_kprobe
->fault_handler(cur_kprobe
, regs
, trapnr
))
551 if (kprobe_status
& KPROBE_HIT_SS
) {
552 resume_execution(cur_kprobe
, regs
);
553 reset_current_kprobe();
554 preempt_enable_no_resched();
560 * Wrapper routine to for handling exceptions.
562 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
563 unsigned long val
, void *data
)
565 struct die_args
*args
= data
;
569 if (cur_kprobe_bp_addr
!= args
->regs
->pc
) {
570 if (kprobe_handler(args
->regs
))
573 if (post_kprobe_handler(args
->regs
))
578 if (kprobe_running() &&
579 kprobe_fault_handler(args
->regs
, args
->trapnr
))
588 /* Jprobes support. */
589 static struct pt_regs jprobe_saved_regs
;
590 static struct pt_regs
*jprobe_saved_regs_location
;
591 static kprobe_opcode_t jprobe_saved_stack
[MAX_STACK_SIZE
];
593 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
595 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
597 jprobe_saved_regs_location
= regs
;
598 memcpy(&jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
600 /* Save a whole stack frame, this gets arguments
601 * pushed onto the stack after using up all the
604 memcpy(&jprobe_saved_stack
, regs
+ 1, sizeof(jprobe_saved_stack
));
606 /* setup return addr to the jprobe handler routine */
607 regs
->pc
= (unsigned long) jp
->entry
;
611 void __kprobes
jprobe_return(void)
613 void *orig_sp
= jprobe_saved_regs_location
+ 1;
615 preempt_enable_no_resched();
616 asm volatile(" mov %0,sp\n"
617 ".globl jprobe_return_bp_addr\n"
618 "jprobe_return_bp_addr:\n\t"
623 extern void jprobe_return_bp_addr(void);
625 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
627 u8
*addr
= (u8
*) regs
->pc
;
629 if (addr
== (u8
*) jprobe_return_bp_addr
) {
630 if (jprobe_saved_regs_location
!= regs
) {
631 printk(KERN_ERR
"JPROBE:"
632 " Current regs (%p) does not match saved regs"
634 regs
, jprobe_saved_regs_location
);
638 /* Restore old register state.
640 memcpy(regs
, &jprobe_saved_regs
, sizeof(struct pt_regs
));
642 memcpy(regs
+ 1, &jprobe_saved_stack
,
643 sizeof(jprobe_saved_stack
));
649 int __init
arch_init_kprobes(void)