x86: xen: size struct xen_spinlock to always fit in arch_spinlock_t
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / blackfin / kernel / ftrace.c
blob9277905b82cf27efacef0cf5f83bc00af838d7db
1 /*
2 * ftrace graph code
4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
8 #include <linux/ftrace.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/uaccess.h>
12 #include <linux/atomic.h>
13 #include <asm/cacheflush.h>
15 #ifdef CONFIG_DYNAMIC_FTRACE
17 static const unsigned char mnop[] = {
18 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
19 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
22 static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
23 unsigned long dst)
25 uint32_t pcrel = (dst - src) >> 1;
26 insn[0] = pcrel >> 16;
27 insn[1] = 0xe3;
28 insn[2] = pcrel;
29 insn[3] = pcrel >> 8;
31 #define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
33 static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
34 unsigned long len)
36 int ret = probe_kernel_write((void *)ip, (void *)code, len);
37 flush_icache_range(ip, ip + len);
38 return ret;
41 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
42 unsigned long addr)
44 /* Turn the mcount call site into two MNOPs as those are 32bit insns */
45 return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
48 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
50 /* Restore the mcount call site */
51 unsigned char call[8];
52 call[0] = 0x67; /* [--SP] = RETS; */
53 call[1] = 0x01;
54 bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
55 call[6] = 0x27; /* RETS = [SP++]; */
56 call[7] = 0x01;
57 return ftrace_modify_code(rec->ip, call, sizeof(call));
60 int ftrace_update_ftrace_func(ftrace_func_t func)
62 unsigned char call[4];
63 unsigned long ip = (unsigned long)&ftrace_call;
64 bfin_make_pcrel24(call, ip, func);
65 return ftrace_modify_code(ip, call, sizeof(call));
68 int __init ftrace_dyn_arch_init(void *data)
70 /* return value is done indirectly via data */
71 *(unsigned long *)data = 0;
73 return 0;
76 #endif
78 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
80 # ifdef CONFIG_DYNAMIC_FTRACE
82 extern void ftrace_graph_call(void);
84 int ftrace_enable_ftrace_graph_caller(void)
86 unsigned long ip = (unsigned long)&ftrace_graph_call;
87 uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
88 jump_pcrel12 |= 0x2000;
89 return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
92 int ftrace_disable_ftrace_graph_caller(void)
94 return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
97 # endif
100 * Hook the return address and push it in the stack of return addrs
101 * in current thread info.
103 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
104 unsigned long frame_pointer)
106 struct ftrace_graph_ent trace;
107 unsigned long return_hooker = (unsigned long)&return_to_handler;
109 if (unlikely(atomic_read(&current->tracing_graph_pause)))
110 return;
112 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
113 frame_pointer) == -EBUSY)
114 return;
116 trace.func = self_addr;
118 /* Only trace if the calling function expects to */
119 if (!ftrace_graph_entry(&trace)) {
120 current->curr_ret_stack--;
121 return;
124 /* all is well in the world ! hijack RETS ... */
125 *parent = return_hooker;
128 #endif