2 * Kernel Probes (KProbes)
3 * arch/ppc64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
30 #include <linux/config.h>
31 #include <linux/kprobes.h>
32 #include <linux/ptrace.h>
33 #include <linux/preempt.h>
34 #include <asm/cacheflush.h>
35 #include <asm/kdebug.h>
36 #include <asm/sstep.h>
38 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
39 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
41 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
44 kprobe_opcode_t insn
= *p
->addr
;
46 if ((unsigned long)p
->addr
& 0x03) {
47 printk("Attempt to register kprobe at an unaligned address\n");
49 } else if (IS_MTMSRD(insn
) || IS_RFID(insn
)) {
50 printk("Cannot register a kprobe on rfid or mtmsrd\n");
54 /* insn must be on a special executable page on ppc64 */
56 p
->ainsn
.insn
= get_insn_slot();
62 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
69 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
71 *p
->addr
= BREAKPOINT_INSTRUCTION
;
72 flush_icache_range((unsigned long) p
->addr
,
73 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
76 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
79 flush_icache_range((unsigned long) p
->addr
,
80 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
83 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
86 free_insn_slot(p
->ainsn
.insn
);
90 static inline void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
92 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
96 /* single step inline if it is a trap variant */
98 regs
->nip
= (unsigned long)p
->addr
;
100 regs
->nip
= (unsigned long)p
->ainsn
.insn
;
103 static inline void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
105 kcb
->prev_kprobe
.kp
= kprobe_running();
106 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
107 kcb
->prev_kprobe
.saved_msr
= kcb
->kprobe_saved_msr
;
110 static inline void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
112 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
113 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
114 kcb
->kprobe_saved_msr
= kcb
->prev_kprobe
.saved_msr
;
117 static inline void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
118 struct kprobe_ctlblk
*kcb
)
120 __get_cpu_var(current_kprobe
) = p
;
121 kcb
->kprobe_saved_msr
= regs
->msr
;
124 /* Called with kretprobe_lock held */
125 void __kprobes
arch_prepare_kretprobe(struct kretprobe
*rp
,
126 struct pt_regs
*regs
)
128 struct kretprobe_instance
*ri
;
130 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
133 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->link
;
135 /* Replace the return addr with trampoline addr */
136 regs
->link
= (unsigned long)kretprobe_trampoline
;
143 static inline int kprobe_handler(struct pt_regs
*regs
)
147 unsigned int *addr
= (unsigned int *)regs
->nip
;
148 struct kprobe_ctlblk
*kcb
;
151 * We don't want to be preempted for the entire
152 * duration of kprobe processing
155 kcb
= get_kprobe_ctlblk();
157 /* Check we're not actually recursing */
158 if (kprobe_running()) {
159 p
= get_kprobe(addr
);
161 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
162 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
164 regs
->msr
&= ~MSR_SE
;
165 regs
->msr
|= kcb
->kprobe_saved_msr
;
168 /* We have reentered the kprobe_handler(), since
169 * another probe was hit while within the handler.
170 * We here save the original kprobes variables and
171 * just single step on the instruction of the new probe
172 * without calling any user handlers.
174 save_previous_kprobe(kcb
);
175 set_current_kprobe(p
, regs
, kcb
);
176 kcb
->kprobe_saved_msr
= regs
->msr
;
177 kprobes_inc_nmissed_count(p
);
178 prepare_singlestep(p
, regs
);
179 kcb
->kprobe_status
= KPROBE_REENTER
;
182 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
183 /* If trap variant, then it belongs not to us */
184 kprobe_opcode_t cur_insn
= *addr
;
185 if (is_trap(cur_insn
))
187 /* The breakpoint instruction was removed by
188 * another cpu right after we hit, no further
189 * handling of this interrupt is appropriate
194 p
= __get_cpu_var(current_kprobe
);
195 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
202 p
= get_kprobe(addr
);
204 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
206 * PowerPC has multiple variants of the "trap"
207 * instruction. If the current instruction is a
208 * trap variant, it could belong to someone else
210 kprobe_opcode_t cur_insn
= *addr
;
211 if (is_trap(cur_insn
))
214 * The breakpoint instruction was removed right
215 * after we hit it. Another cpu has removed
216 * either a probepoint or a debugger breakpoint
217 * at this address. In either case, no further
218 * handling of this interrupt is appropriate.
222 /* Not one of ours: let kernel handle it */
226 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
227 set_current_kprobe(p
, regs
, kcb
);
228 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
229 /* handler has already set things up, so skip ss setup */
233 prepare_singlestep(p
, regs
);
234 kcb
->kprobe_status
= KPROBE_HIT_SS
;
238 preempt_enable_no_resched();
243 * Function return probe trampoline:
244 * - init_kprobes() establishes a probepoint here
245 * - When the probed function returns, this probe
246 * causes the handlers to fire
248 void kretprobe_trampoline_holder(void)
250 asm volatile(".global kretprobe_trampoline\n"
251 "kretprobe_trampoline:\n"
256 * Called when the probe at kretprobe trampoline is hit
258 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
260 struct kretprobe_instance
*ri
= NULL
;
261 struct hlist_head
*head
;
262 struct hlist_node
*node
, *tmp
;
263 unsigned long flags
, orig_ret_address
= 0;
264 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
266 spin_lock_irqsave(&kretprobe_lock
, flags
);
267 head
= kretprobe_inst_table_head(current
);
270 * It is possible to have multiple instances associated with a given
271 * task either because an multiple functions in the call path
272 * have a return probe installed on them, and/or more then one return
273 * return probe was registered for a target function.
275 * We can handle this because:
276 * - instances are always inserted at the head of the list
277 * - when multiple return probes are registered for the same
278 * function, the first instance's ret_addr will point to the
279 * real return address, and all the rest will point to
280 * kretprobe_trampoline
282 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
283 if (ri
->task
!= current
)
284 /* another task is sharing our hash bucket */
287 if (ri
->rp
&& ri
->rp
->handler
)
288 ri
->rp
->handler(ri
, regs
);
290 orig_ret_address
= (unsigned long)ri
->ret_addr
;
293 if (orig_ret_address
!= trampoline_address
)
295 * This is the real return address. Any other
296 * instances associated with this task are for
297 * other calls deeper on the call stack
302 BUG_ON(!orig_ret_address
|| (orig_ret_address
== trampoline_address
));
303 regs
->nip
= orig_ret_address
;
305 reset_current_kprobe();
306 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
307 preempt_enable_no_resched();
310 * By returning a non-zero value, we are telling
311 * kprobe_handler() that we don't want the post_handler
312 * to run (and have re-enabled preemption)
318 * Called after single-stepping. p->addr is the address of the
319 * instruction whose first byte has been replaced by the "breakpoint"
320 * instruction. To avoid the SMP problems that can occur when we
321 * temporarily put back the original opcode to single-step, we
322 * single-stepped a copy of the instruction. The address of this
323 * copy is p->ainsn.insn.
325 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
328 unsigned int insn
= *p
->ainsn
.insn
;
330 regs
->nip
= (unsigned long)p
->addr
;
331 ret
= emulate_step(regs
, insn
);
333 regs
->nip
= (unsigned long)p
->addr
+ 4;
336 static inline int post_kprobe_handler(struct pt_regs
*regs
)
338 struct kprobe
*cur
= kprobe_running();
339 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
344 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
345 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
346 cur
->post_handler(cur
, regs
, 0);
349 resume_execution(cur
, regs
);
350 regs
->msr
|= kcb
->kprobe_saved_msr
;
352 /*Restore back the original saved kprobes variables and continue. */
353 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
354 restore_previous_kprobe(kcb
);
357 reset_current_kprobe();
359 preempt_enable_no_resched();
362 * if somebody else is singlestepping across a probe point, msr
363 * will have SE set, in which case, continue the remaining processing
364 * of do_debug, as if this is not a probe hit.
366 if (regs
->msr
& MSR_SE
)
372 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
374 struct kprobe
*cur
= kprobe_running();
375 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
377 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
380 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
381 resume_execution(cur
, regs
);
382 regs
->msr
&= ~MSR_SE
;
383 regs
->msr
|= kcb
->kprobe_saved_msr
;
385 reset_current_kprobe();
386 preempt_enable_no_resched();
392 * Wrapper routine to for handling exceptions.
394 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
395 unsigned long val
, void *data
)
397 struct die_args
*args
= (struct die_args
*)data
;
398 int ret
= NOTIFY_DONE
;
402 if (kprobe_handler(args
->regs
))
406 if (post_kprobe_handler(args
->regs
))
410 /* kprobe_running() needs smp_processor_id() */
412 if (kprobe_running() &&
413 kprobe_fault_handler(args
->regs
, args
->trapnr
))
423 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
425 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
426 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
428 memcpy(&kcb
->jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
430 /* setup return addr to the jprobe handler routine */
431 regs
->nip
= (unsigned long)(((func_descr_t
*)jp
->entry
)->entry
);
432 regs
->gpr
[2] = (unsigned long)(((func_descr_t
*)jp
->entry
)->toc
);
437 void __kprobes
jprobe_return(void)
439 asm volatile("trap" ::: "memory");
442 void __kprobes
jprobe_return_end(void)
446 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
448 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
451 * FIXME - we should ideally be validating that we got here 'cos
452 * of the "trap" in jprobe_return() above, before restoring the
455 memcpy(regs
, &kcb
->jprobe_saved_regs
, sizeof(struct pt_regs
));
456 preempt_enable_no_resched();
460 static struct kprobe trampoline_p
= {
461 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
462 .pre_handler
= trampoline_probe_handler
465 int __init
arch_init_kprobes(void)
467 return register_kprobe(&trampoline_p
);