2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
21 #include <asm/ftrace.h>
22 #include <linux/ftrace.h>
27 #ifdef CONFIG_DYNAMIC_FTRACE
29 union ftrace_code_union
{
30 char code
[MCOUNT_INSN_SIZE
];
34 } __attribute__((packed
));
37 static int ftrace_calc_offset(long ip
, long addr
)
39 return (int)(addr
- ip
);
42 static unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
44 static union ftrace_code_union calc
;
47 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
50 * No locking needed, this must be called via kstop_machine
51 * which in essence is like running on a uniprocessor machine.
57 * Modifying code must take extra care. On an SMP machine, if
58 * the code being modified is also being executed on another CPU
59 * that CPU will have undefined results and possibly take a GPF.
60 * We use kstop_machine to stop other CPUS from exectuing code.
61 * But this does not stop NMIs from happening. We still need
62 * to protect against that. We separate out the modification of
63 * the code to take care of this.
65 * Two buffers are added: An IP buffer and a "code" buffer.
67 * 1) Put the instruction pointer into the IP buffer
68 * and the new code into the "code" buffer.
69 * 2) Set a flag that says we are modifying code
70 * 3) Wait for any running NMIs to finish.
73 * 6) Wait for any running NMIs to finish.
75 * If an NMI is executed, the first thing it does is to call
76 * "ftrace_nmi_enter". This will check if the flag is set to write
77 * and if it is, it will write what is in the IP and "code" buffers.
79 * The trick is, it does not matter if everyone is writing the same
80 * content to the code location. Also, if a CPU is executing code
81 * it is OK to write to that code location if the contents being written
82 * are the same as what exists.
85 static atomic_t in_nmi
= ATOMIC_INIT(0);
86 static int mod_code_status
; /* holds return value of text write */
87 static int mod_code_write
; /* set when NMI should do the write */
88 static void *mod_code_ip
; /* holds the IP to write to */
89 static void *mod_code_newcode
; /* holds the text to write to the IP */
91 static unsigned nmi_wait_count
;
92 static atomic_t nmi_update_count
= ATOMIC_INIT(0);
94 int ftrace_arch_read_dyn_info(char *buf
, int size
)
98 r
= snprintf(buf
, size
, "%u %u",
100 atomic_read(&nmi_update_count
));
104 static void ftrace_mod_code(void)
107 * Yes, more than one CPU process can be writing to mod_code_status.
108 * (and the code itself)
109 * But if one were to fail, then they all should, and if one were
110 * to succeed, then they all should.
112 mod_code_status
= probe_kernel_write(mod_code_ip
, mod_code_newcode
,
116 void ftrace_nmi_enter(void)
119 /* Must have in_nmi seen before reading write flag */
121 if (mod_code_write
) {
123 atomic_inc(&nmi_update_count
);
127 void ftrace_nmi_exit(void)
129 /* Finish all executions before clearing in_nmi */
134 static void wait_for_nmi(void)
138 while (atomic_read(&in_nmi
)) {
148 do_ftrace_mod_code(unsigned long ip
, void *new_code
)
150 mod_code_ip
= (void *)ip
;
151 mod_code_newcode
= new_code
;
153 /* The buffers need to be visible before we let NMIs write them */
158 /* Make sure write bit is visible before we wait on NMIs */
163 /* Make sure all running NMIs have finished before we write the code */
168 /* Make sure the write happens before clearing the bit */
173 /* make sure NMIs see the cleared bit */
178 return mod_code_status
;
184 static unsigned char ftrace_nop
[MCOUNT_INSN_SIZE
];
186 static unsigned char *ftrace_nop_replace(void)
192 ftrace_modify_code(unsigned long ip
, unsigned char *old_code
,
193 unsigned char *new_code
)
195 unsigned char replaced
[MCOUNT_INSN_SIZE
];
198 * Note: Due to modules and __init, code can
199 * disappear and change, we need to protect against faulting
200 * as well as code changing. We do this by using the
201 * probe_kernel_* functions.
203 * No real locking needed, this code is run through
204 * kstop_machine, or before SMP starts.
207 /* read the text we want to modify */
208 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
211 /* Make sure it is what we expect it to be */
212 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
215 /* replace the text with the new text */
216 if (do_ftrace_mod_code(ip
, new_code
))
224 int ftrace_make_nop(struct module
*mod
,
225 struct dyn_ftrace
*rec
, unsigned long addr
)
227 unsigned char *new, *old
;
228 unsigned long ip
= rec
->ip
;
230 old
= ftrace_call_replace(ip
, addr
);
231 new = ftrace_nop_replace();
233 return ftrace_modify_code(rec
->ip
, old
, new);
236 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
238 unsigned char *new, *old
;
239 unsigned long ip
= rec
->ip
;
241 old
= ftrace_nop_replace();
242 new = ftrace_call_replace(ip
, addr
);
244 return ftrace_modify_code(rec
->ip
, old
, new);
247 int ftrace_update_ftrace_func(ftrace_func_t func
)
249 unsigned long ip
= (unsigned long)(&ftrace_call
);
250 unsigned char old
[MCOUNT_INSN_SIZE
], *new;
253 memcpy(old
, &ftrace_call
, MCOUNT_INSN_SIZE
);
254 new = ftrace_call_replace(ip
, (unsigned long)func
);
255 ret
= ftrace_modify_code(ip
, old
, new);
260 int __init
ftrace_dyn_arch_init(void *data
)
262 extern const unsigned char ftrace_test_p6nop
[];
263 extern const unsigned char ftrace_test_nop5
[];
264 extern const unsigned char ftrace_test_jmp
[];
268 * There is no good nop for all x86 archs.
269 * We will default to using the P6_NOP5, but first we
270 * will test to make sure that the nop will actually
271 * work on this CPU. If it faults, we will then
272 * go to a lesser efficient 5 byte nop. If that fails
273 * we then just use a jmp as our nop. This isn't the most
274 * efficient nop, but we can not use a multi part nop
275 * since we would then risk being preempted in the middle
276 * of that nop, and if we enabled tracing then, it might
277 * cause a system crash.
279 * TODO: check the cpuid to determine the best nop.
283 "jmp ftrace_test_p6nop\n"
286 "nop\n" /* 2 byte jmp + 3 bytes */
291 ".byte 0x66,0x66,0x66,0x66,0x90\n"
293 ".section .fixup, \"ax\"\n"
295 " jmp ftrace_test_nop5\n"
299 _ASM_EXTABLE(ftrace_test_p6nop
, 2b
)
300 _ASM_EXTABLE(ftrace_test_nop5
, 3b
)
301 : "=r"(faulted
) : "0" (faulted
));
305 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
306 memcpy(ftrace_nop
, ftrace_test_p6nop
, MCOUNT_INSN_SIZE
);
309 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
310 memcpy(ftrace_nop
, ftrace_test_nop5
, MCOUNT_INSN_SIZE
);
313 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
314 memcpy(ftrace_nop
, ftrace_test_jmp
, MCOUNT_INSN_SIZE
);
318 /* The return code is retured via data */
319 *(unsigned long *)data
= 0;
325 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
327 #ifdef CONFIG_DYNAMIC_FTRACE
328 extern void ftrace_graph_call(void);
330 static int ftrace_mod_jmp(unsigned long ip
,
331 int old_offset
, int new_offset
)
333 unsigned char code
[MCOUNT_INSN_SIZE
];
335 if (probe_kernel_read(code
, (void *)ip
, MCOUNT_INSN_SIZE
))
338 if (code
[0] != 0xe9 || old_offset
!= *(int *)(&code
[1]))
341 *(int *)(&code
[1]) = new_offset
;
343 if (do_ftrace_mod_code(ip
, &code
))
349 int ftrace_enable_ftrace_graph_caller(void)
351 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
352 int old_offset
, new_offset
;
354 old_offset
= (unsigned long)(&ftrace_stub
) - (ip
+ MCOUNT_INSN_SIZE
);
355 new_offset
= (unsigned long)(&ftrace_graph_caller
) - (ip
+ MCOUNT_INSN_SIZE
);
357 return ftrace_mod_jmp(ip
, old_offset
, new_offset
);
360 int ftrace_disable_ftrace_graph_caller(void)
362 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
363 int old_offset
, new_offset
;
365 old_offset
= (unsigned long)(&ftrace_graph_caller
) - (ip
+ MCOUNT_INSN_SIZE
);
366 new_offset
= (unsigned long)(&ftrace_stub
) - (ip
+ MCOUNT_INSN_SIZE
);
368 return ftrace_mod_jmp(ip
, old_offset
, new_offset
);
371 #else /* CONFIG_DYNAMIC_FTRACE */
374 * These functions are picked from those used on
375 * this page for dynamic ftrace. They have been
376 * simplified to ignore all traces in NMI context.
378 static atomic_t in_nmi
;
380 void ftrace_nmi_enter(void)
385 void ftrace_nmi_exit(void)
390 #endif /* !CONFIG_DYNAMIC_FTRACE */
392 /* Add a function return address to the trace stack on thread info.*/
393 static int push_return_trace(unsigned long ret
, unsigned long long time
,
394 unsigned long func
, int *depth
)
398 if (!current
->ret_stack
)
401 /* The return trace stack is full */
402 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
403 atomic_inc(¤t
->trace_overrun
);
407 index
= ++current
->curr_ret_stack
;
409 current
->ret_stack
[index
].ret
= ret
;
410 current
->ret_stack
[index
].func
= func
;
411 current
->ret_stack
[index
].calltime
= time
;
417 /* Retrieve a function return address to the trace stack on thread info.*/
418 static void pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
)
422 index
= current
->curr_ret_stack
;
424 if (unlikely(index
< 0)) {
427 /* Might as well panic, otherwise we have no where to go */
428 *ret
= (unsigned long)panic
;
432 *ret
= current
->ret_stack
[index
].ret
;
433 trace
->func
= current
->ret_stack
[index
].func
;
434 trace
->calltime
= current
->ret_stack
[index
].calltime
;
435 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
436 trace
->depth
= index
;
438 current
->curr_ret_stack
--;
443 * Send the trace to the ring-buffer.
444 * @return the original return address.
446 unsigned long ftrace_return_to_handler(void)
448 struct ftrace_graph_ret trace
;
451 pop_return_trace(&trace
, &ret
);
452 trace
.rettime
= cpu_clock(raw_smp_processor_id());
453 ftrace_graph_return(&trace
);
455 if (unlikely(!ret
)) {
458 /* Might as well panic. What else to do? */
459 ret
= (unsigned long)panic
;
466 * Hook the return address and push it in the stack of return addrs
467 * in current thread info.
469 void prepare_ftrace_return(unsigned long *parent
, unsigned long self_addr
)
472 unsigned long long calltime
;
474 struct ftrace_graph_ent trace
;
475 unsigned long return_hooker
= (unsigned long)
478 /* Nmi's are currently unsupported */
479 if (unlikely(atomic_read(&in_nmi
)))
482 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
486 * Protect against fault, even if it shouldn't
487 * happen. This tool is too much intrusive to
488 * ignore such a protection.
491 "1: " _ASM_MOV
" (%[parent_old]), %[old]\n"
492 "2: " _ASM_MOV
" %[return_hooker], (%[parent_replaced])\n"
493 " movl $0, %[faulted]\n"
495 ".section .fixup, \"ax\"\n"
496 "3: movl $1, %[faulted]\n"
502 : [parent_replaced
] "=r" (parent
), [old
] "=r" (old
),
503 [faulted
] "=r" (faulted
)
504 : [parent_old
] "0" (parent
), [return_hooker
] "r" (return_hooker
)
508 if (unlikely(faulted
)) {
514 if (unlikely(!__kernel_text_address(old
))) {
521 calltime
= cpu_clock(raw_smp_processor_id());
523 if (push_return_trace(old
, calltime
,
524 self_addr
, &trace
.depth
) == -EBUSY
) {
529 trace
.func
= self_addr
;
531 /* Only trace if the calling function expects to */
532 if (!ftrace_graph_entry(&trace
)) {
533 current
->curr_ret_stack
--;
537 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */