ftrace, powerpc, sparc64, x86: remove notrace from arch ftrace file
[firewire-audio.git] / arch / powerpc / kernel / ftrace.c
blobf4b006ed0ab1ef183a0b0593520be182f4144c1f
1 /*
2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 */
10 #include <linux/spinlock.h>
11 #include <linux/hardirq.h>
12 #include <linux/ftrace.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
17 #include <asm/cacheflush.h>
18 #include <asm/ftrace.h>
21 static unsigned int ftrace_nop = 0x60000000;
23 #ifdef CONFIG_PPC32
24 # define GET_ADDR(addr) addr
25 #else
26 /* PowerPC64's functions are data that points to the functions */
27 # define GET_ADDR(addr) *(unsigned long *)addr
28 #endif
31 static unsigned int ftrace_calc_offset(long ip, long addr)
33 return (int)(addr - ip);
36 unsigned char *ftrace_nop_replace(void)
38 return (char *)&ftrace_nop;
41 unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
43 static unsigned int op;
46 * It would be nice to just use create_function_call, but that will
47 * update the code itself. Here we need to just return the
48 * instruction that is going to be modified, without modifying the
49 * code.
51 addr = GET_ADDR(addr);
53 /* Set to "bl addr" */
54 op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc);
57 * No locking needed, this must be called via kstop_machine
58 * which in essence is like running on a uniprocessor machine.
60 return (unsigned char *)&op;
63 #ifdef CONFIG_PPC64
64 # define _ASM_ALIGN " .align 3 "
65 # define _ASM_PTR " .llong "
66 #else
67 # define _ASM_ALIGN " .align 2 "
68 # define _ASM_PTR " .long "
69 #endif
71 int
72 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
73 unsigned char *new_code)
75 unsigned replaced;
76 unsigned old = *(unsigned *)old_code;
77 unsigned new = *(unsigned *)new_code;
78 int faulted = 0;
81 * Note: Due to modules and __init, code can
82 * disappear and change, we need to protect against faulting
83 * as well as code changing.
85 * No real locking needed, this code is run through
86 * kstop_machine.
88 asm volatile (
89 "1: lwz %1, 0(%2)\n"
90 " cmpw %1, %5\n"
91 " bne 2f\n"
92 " stwu %3, 0(%2)\n"
93 "2:\n"
94 ".section .fixup, \"ax\"\n"
95 "3: li %0, 1\n"
96 " b 2b\n"
97 ".previous\n"
98 ".section __ex_table,\"a\"\n"
99 _ASM_ALIGN "\n"
100 _ASM_PTR "1b, 3b\n"
101 ".previous"
102 : "=r"(faulted), "=r"(replaced)
103 : "r"(ip), "r"(new),
104 "0"(faulted), "r"(old)
105 : "memory");
107 if (replaced != old && replaced != new)
108 faulted = 2;
110 if (!faulted)
111 flush_icache_range(ip, ip + 8);
113 return faulted;
116 int ftrace_update_ftrace_func(ftrace_func_t func)
118 unsigned long ip = (unsigned long)(&ftrace_call);
119 unsigned char old[MCOUNT_INSN_SIZE], *new;
120 int ret;
122 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
123 new = ftrace_call_replace(ip, (unsigned long)func);
124 ret = ftrace_modify_code(ip, old, new);
126 return ret;
129 int __init ftrace_dyn_arch_init(void *data)
131 /* This is running in kstop_machine */
133 ftrace_mcount_set(data);
135 return 0;