2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/percpu.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
20 #include <asm/ftrace.h>
24 /* Long is fine, even if it is only 4 bytes ;-) */
25 static unsigned long *ftrace_nop
;
27 union ftrace_code_union
{
28 char code
[MCOUNT_INSN_SIZE
];
32 } __attribute__((packed
));
36 static int ftrace_calc_offset(long ip
, long addr
)
38 return (int)(addr
- ip
);
41 unsigned char *ftrace_nop_replace(void)
43 return (char *)ftrace_nop
;
46 unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
48 static union ftrace_code_union calc
;
51 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
54 * No locking needed, this must be called via kstop_machine
55 * which in essence is like running on a uniprocessor machine.
61 ftrace_modify_code(unsigned long ip
, unsigned char *old_code
,
62 unsigned char *new_code
)
64 unsigned char replaced
[MCOUNT_INSN_SIZE
];
67 * Note: Due to modules and __init, code can
68 * disappear and change, we need to protect against faulting
69 * as well as code changing. We do this by using the
70 * probe_kernel_* functions.
72 * No real locking needed, this code is run through
73 * kstop_machine, or before SMP starts.
76 /* read the text we want to modify */
77 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
80 /* Make sure it is what we expect it to be */
81 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
84 /* replace the text with the new text */
85 if (probe_kernel_write((void *)ip
, new_code
, MCOUNT_INSN_SIZE
))
93 int ftrace_update_ftrace_func(ftrace_func_t func
)
95 unsigned long ip
= (unsigned long)(&ftrace_call
);
96 unsigned char old
[MCOUNT_INSN_SIZE
], *new;
99 memcpy(old
, &ftrace_call
, MCOUNT_INSN_SIZE
);
100 new = ftrace_call_replace(ip
, (unsigned long)func
);
101 ret
= ftrace_modify_code(ip
, old
, new);
106 int __init
ftrace_dyn_arch_init(void *data
)
108 extern const unsigned char ftrace_test_p6nop
[];
109 extern const unsigned char ftrace_test_nop5
[];
110 extern const unsigned char ftrace_test_jmp
[];
114 * There is no good nop for all x86 archs.
115 * We will default to using the P6_NOP5, but first we
116 * will test to make sure that the nop will actually
117 * work on this CPU. If it faults, we will then
118 * go to a lesser efficient 5 byte nop. If that fails
119 * we then just use a jmp as our nop. This isn't the most
120 * efficient nop, but we can not use a multi part nop
121 * since we would then risk being preempted in the middle
122 * of that nop, and if we enabled tracing then, it might
123 * cause a system crash.
125 * TODO: check the cpuid to determine the best nop.
128 "jmp ftrace_test_jmp\n"
129 /* This code needs to stay around */
130 ".section .text, \"ax\"\n"
132 "jmp ftrace_test_p6nop\n"
135 "nop\n" /* 2 byte jmp + 3 bytes */
140 ".byte 0x66,0x66,0x66,0x66,0x90\n"
144 ".section .fixup, \"ax\"\n"
146 " jmp ftrace_test_nop5\n"
150 _ASM_EXTABLE(ftrace_test_p6nop
, 2b
)
151 _ASM_EXTABLE(ftrace_test_nop5
, 3b
)
152 : "=r"(faulted
) : "0" (faulted
));
156 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
157 ftrace_nop
= (unsigned long *)ftrace_test_p6nop
;
160 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
161 ftrace_nop
= (unsigned long *)ftrace_test_nop5
;
164 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
165 ftrace_nop
= (unsigned long *)ftrace_test_jmp
;
169 /* The return code is retured via data */
170 *(unsigned long *)data
= 0;