[PATCH] ppc64: SMU based macs cpufreq support
[linux-2.6/mini2440.git] / arch / ppc64 / kernel / kprobes.c
blob511af54e6230ca13eeba65dd7765dcb9f7c8947c
1 /*
2 * Kernel Probes (KProbes)
3 * arch/ppc64/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
27 * for PPC64
30 #include <linux/config.h>
31 #include <linux/kprobes.h>
32 #include <linux/ptrace.h>
33 #include <linux/preempt.h>
34 #include <asm/cacheflush.h>
35 #include <asm/kdebug.h>
36 #include <asm/sstep.h>
38 static DECLARE_MUTEX(kprobe_mutex);
39 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
42 int __kprobes arch_prepare_kprobe(struct kprobe *p)
44 int ret = 0;
45 kprobe_opcode_t insn = *p->addr;
47 if ((unsigned long)p->addr & 0x03) {
48 printk("Attempt to register kprobe at an unaligned address\n");
49 ret = -EINVAL;
50 } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
51 printk("Cannot register a kprobe on rfid or mtmsrd\n");
52 ret = -EINVAL;
55 /* insn must be on a special executable page on ppc64 */
56 if (!ret) {
57 down(&kprobe_mutex);
58 p->ainsn.insn = get_insn_slot();
59 up(&kprobe_mutex);
60 if (!p->ainsn.insn)
61 ret = -ENOMEM;
63 return ret;
66 void __kprobes arch_copy_kprobe(struct kprobe *p)
68 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
69 p->opcode = *p->addr;
72 void __kprobes arch_arm_kprobe(struct kprobe *p)
74 *p->addr = BREAKPOINT_INSTRUCTION;
75 flush_icache_range((unsigned long) p->addr,
76 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
79 void __kprobes arch_disarm_kprobe(struct kprobe *p)
81 *p->addr = p->opcode;
82 flush_icache_range((unsigned long) p->addr,
83 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
86 void __kprobes arch_remove_kprobe(struct kprobe *p)
88 down(&kprobe_mutex);
89 free_insn_slot(p->ainsn.insn);
90 up(&kprobe_mutex);
93 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
95 kprobe_opcode_t insn = *p->ainsn.insn;
97 regs->msr |= MSR_SE;
99 /* single step inline if it is a trap variant */
100 if (is_trap(insn))
101 regs->nip = (unsigned long)p->addr;
102 else
103 regs->nip = (unsigned long)p->ainsn.insn;
106 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
108 kcb->prev_kprobe.kp = kprobe_running();
109 kcb->prev_kprobe.status = kcb->kprobe_status;
110 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
113 static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
115 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
116 kcb->kprobe_status = kcb->prev_kprobe.status;
117 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
120 static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
121 struct kprobe_ctlblk *kcb)
123 __get_cpu_var(current_kprobe) = p;
124 kcb->kprobe_saved_msr = regs->msr;
127 /* Called with kretprobe_lock held */
128 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
129 struct pt_regs *regs)
131 struct kretprobe_instance *ri;
133 if ((ri = get_free_rp_inst(rp)) != NULL) {
134 ri->rp = rp;
135 ri->task = current;
136 ri->ret_addr = (kprobe_opcode_t *)regs->link;
138 /* Replace the return addr with trampoline addr */
139 regs->link = (unsigned long)kretprobe_trampoline;
140 add_rp_inst(ri);
141 } else {
142 rp->nmissed++;
146 static inline int kprobe_handler(struct pt_regs *regs)
148 struct kprobe *p;
149 int ret = 0;
150 unsigned int *addr = (unsigned int *)regs->nip;
151 struct kprobe_ctlblk *kcb;
154 * We don't want to be preempted for the entire
155 * duration of kprobe processing
157 preempt_disable();
158 kcb = get_kprobe_ctlblk();
160 /* Check we're not actually recursing */
161 if (kprobe_running()) {
162 p = get_kprobe(addr);
163 if (p) {
164 kprobe_opcode_t insn = *p->ainsn.insn;
165 if (kcb->kprobe_status == KPROBE_HIT_SS &&
166 is_trap(insn)) {
167 regs->msr &= ~MSR_SE;
168 regs->msr |= kcb->kprobe_saved_msr;
169 goto no_kprobe;
171 /* We have reentered the kprobe_handler(), since
172 * another probe was hit while within the handler.
173 * We here save the original kprobes variables and
174 * just single step on the instruction of the new probe
175 * without calling any user handlers.
177 save_previous_kprobe(kcb);
178 set_current_kprobe(p, regs, kcb);
179 kcb->kprobe_saved_msr = regs->msr;
180 p->nmissed++;
181 prepare_singlestep(p, regs);
182 kcb->kprobe_status = KPROBE_REENTER;
183 return 1;
184 } else {
185 p = __get_cpu_var(current_kprobe);
186 if (p->break_handler && p->break_handler(p, regs)) {
187 goto ss_probe;
190 goto no_kprobe;
193 p = get_kprobe(addr);
194 if (!p) {
195 if (*addr != BREAKPOINT_INSTRUCTION) {
197 * PowerPC has multiple variants of the "trap"
198 * instruction. If the current instruction is a
199 * trap variant, it could belong to someone else
201 kprobe_opcode_t cur_insn = *addr;
202 if (is_trap(cur_insn))
203 goto no_kprobe;
205 * The breakpoint instruction was removed right
206 * after we hit it. Another cpu has removed
207 * either a probepoint or a debugger breakpoint
208 * at this address. In either case, no further
209 * handling of this interrupt is appropriate.
211 ret = 1;
213 /* Not one of ours: let kernel handle it */
214 goto no_kprobe;
217 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
218 set_current_kprobe(p, regs, kcb);
219 if (p->pre_handler && p->pre_handler(p, regs))
220 /* handler has already set things up, so skip ss setup */
221 return 1;
223 ss_probe:
224 prepare_singlestep(p, regs);
225 kcb->kprobe_status = KPROBE_HIT_SS;
226 return 1;
228 no_kprobe:
229 preempt_enable_no_resched();
230 return ret;
234 * Function return probe trampoline:
235 * - init_kprobes() establishes a probepoint here
236 * - When the probed function returns, this probe
237 * causes the handlers to fire
239 void kretprobe_trampoline_holder(void)
241 asm volatile(".global kretprobe_trampoline\n"
242 "kretprobe_trampoline:\n"
243 "nop\n");
247 * Called when the probe at kretprobe trampoline is hit
249 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
251 struct kretprobe_instance *ri = NULL;
252 struct hlist_head *head;
253 struct hlist_node *node, *tmp;
254 unsigned long flags, orig_ret_address = 0;
255 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
257 spin_lock_irqsave(&kretprobe_lock, flags);
258 head = kretprobe_inst_table_head(current);
261 * It is possible to have multiple instances associated with a given
262 * task either because an multiple functions in the call path
263 * have a return probe installed on them, and/or more then one return
264 * return probe was registered for a target function.
266 * We can handle this because:
267 * - instances are always inserted at the head of the list
268 * - when multiple return probes are registered for the same
269 * function, the first instance's ret_addr will point to the
270 * real return address, and all the rest will point to
271 * kretprobe_trampoline
273 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
274 if (ri->task != current)
275 /* another task is sharing our hash bucket */
276 continue;
278 if (ri->rp && ri->rp->handler)
279 ri->rp->handler(ri, regs);
281 orig_ret_address = (unsigned long)ri->ret_addr;
282 recycle_rp_inst(ri);
284 if (orig_ret_address != trampoline_address)
286 * This is the real return address. Any other
287 * instances associated with this task are for
288 * other calls deeper on the call stack
290 break;
293 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
294 regs->nip = orig_ret_address;
296 reset_current_kprobe();
297 spin_unlock_irqrestore(&kretprobe_lock, flags);
298 preempt_enable_no_resched();
301 * By returning a non-zero value, we are telling
302 * kprobe_handler() that we don't want the post_handler
303 * to run (and have re-enabled preemption)
305 return 1;
309 * Called after single-stepping. p->addr is the address of the
310 * instruction whose first byte has been replaced by the "breakpoint"
311 * instruction. To avoid the SMP problems that can occur when we
312 * temporarily put back the original opcode to single-step, we
313 * single-stepped a copy of the instruction. The address of this
314 * copy is p->ainsn.insn.
316 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
318 int ret;
319 unsigned int insn = *p->ainsn.insn;
321 regs->nip = (unsigned long)p->addr;
322 ret = emulate_step(regs, insn);
323 if (ret == 0)
324 regs->nip = (unsigned long)p->addr + 4;
327 static inline int post_kprobe_handler(struct pt_regs *regs)
329 struct kprobe *cur = kprobe_running();
330 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
332 if (!cur)
333 return 0;
335 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
336 kcb->kprobe_status = KPROBE_HIT_SSDONE;
337 cur->post_handler(cur, regs, 0);
340 resume_execution(cur, regs);
341 regs->msr |= kcb->kprobe_saved_msr;
343 /*Restore back the original saved kprobes variables and continue. */
344 if (kcb->kprobe_status == KPROBE_REENTER) {
345 restore_previous_kprobe(kcb);
346 goto out;
348 reset_current_kprobe();
349 out:
350 preempt_enable_no_resched();
353 * if somebody else is singlestepping across a probe point, msr
354 * will have SE set, in which case, continue the remaining processing
355 * of do_debug, as if this is not a probe hit.
357 if (regs->msr & MSR_SE)
358 return 0;
360 return 1;
363 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
365 struct kprobe *cur = kprobe_running();
366 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
368 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
369 return 1;
371 if (kcb->kprobe_status & KPROBE_HIT_SS) {
372 resume_execution(cur, regs);
373 regs->msr &= ~MSR_SE;
374 regs->msr |= kcb->kprobe_saved_msr;
376 reset_current_kprobe();
377 preempt_enable_no_resched();
379 return 0;
383 * Wrapper routine to for handling exceptions.
385 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
386 unsigned long val, void *data)
388 struct die_args *args = (struct die_args *)data;
389 int ret = NOTIFY_DONE;
391 switch (val) {
392 case DIE_BPT:
393 if (kprobe_handler(args->regs))
394 ret = NOTIFY_STOP;
395 break;
396 case DIE_SSTEP:
397 if (post_kprobe_handler(args->regs))
398 ret = NOTIFY_STOP;
399 break;
400 case DIE_PAGE_FAULT:
401 /* kprobe_running() needs smp_processor_id() */
402 preempt_disable();
403 if (kprobe_running() &&
404 kprobe_fault_handler(args->regs, args->trapnr))
405 ret = NOTIFY_STOP;
406 preempt_enable();
407 break;
408 default:
409 break;
411 return ret;
414 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
416 struct jprobe *jp = container_of(p, struct jprobe, kp);
417 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
419 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
421 /* setup return addr to the jprobe handler routine */
422 regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
423 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
425 return 1;
428 void __kprobes jprobe_return(void)
430 asm volatile("trap" ::: "memory");
433 void __kprobes jprobe_return_end(void)
437 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
439 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
442 * FIXME - we should ideally be validating that we got here 'cos
443 * of the "trap" in jprobe_return() above, before restoring the
444 * saved regs...
446 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
447 preempt_enable_no_resched();
448 return 1;
451 static struct kprobe trampoline_p = {
452 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
453 .pre_handler = trampoline_probe_handler
456 int __init arch_init_kprobes(void)
458 return register_kprobe(&trampoline_p);