ftrace: Synchronize variable setting with breakpoints
[linux-2.6.git] / arch / x86 / kernel / ftrace.c
blob2407a6d81cb7de3a8cb23ea718b8b4f979cddbf0
1 /*
2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/spinlock.h>
15 #include <linux/hardirq.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
24 #include <trace/syscall.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kprobes.h>
28 #include <asm/ftrace.h>
29 #include <asm/nops.h>
31 #ifdef CONFIG_DYNAMIC_FTRACE
33 int ftrace_arch_code_modify_prepare(void)
35 set_kernel_text_rw();
36 set_all_modules_text_rw();
37 return 0;
40 int ftrace_arch_code_modify_post_process(void)
42 set_all_modules_text_ro();
43 set_kernel_text_ro();
44 return 0;
47 union ftrace_code_union {
48 char code[MCOUNT_INSN_SIZE];
49 struct {
50 char e8;
51 int offset;
52 } __attribute__((packed));
55 static int ftrace_calc_offset(long ip, long addr)
57 return (int)(addr - ip);
60 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
62 static union ftrace_code_union calc;
64 calc.e8 = 0xe8;
65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
68 * No locking needed, this must be called via kstop_machine
69 * which in essence is like running on a uniprocessor machine.
71 return calc.code;
74 static inline int
75 within(unsigned long addr, unsigned long start, unsigned long end)
77 return addr >= start && addr < end;
80 static int
81 do_ftrace_mod_code(unsigned long ip, const void *new_code)
84 * On x86_64, kernel text mappings are mapped read-only with
85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
86 * of the kernel text mapping to modify the kernel text.
88 * For 32bit kernels, these mappings are same and we can use
89 * kernel identity mapping to modify code.
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
92 ip = (unsigned long)__va(__pa(ip));
94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
97 static const unsigned char *ftrace_nop_replace(void)
99 return ideal_nops[NOP_ATOMIC5];
102 static int
103 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
104 unsigned const char *new_code)
106 unsigned char replaced[MCOUNT_INSN_SIZE];
109 * Note: Due to modules and __init, code can
110 * disappear and change, we need to protect against faulting
111 * as well as code changing. We do this by using the
112 * probe_kernel_* functions.
114 * No real locking needed, this code is run through
115 * kstop_machine, or before SMP starts.
118 /* read the text we want to modify */
119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
120 return -EFAULT;
122 /* Make sure it is what we expect it to be */
123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124 return -EINVAL;
126 /* replace the text with the new text */
127 if (do_ftrace_mod_code(ip, new_code))
128 return -EPERM;
130 sync_core();
132 return 0;
135 int ftrace_make_nop(struct module *mod,
136 struct dyn_ftrace *rec, unsigned long addr)
138 unsigned const char *new, *old;
139 unsigned long ip = rec->ip;
141 old = ftrace_call_replace(ip, addr);
142 new = ftrace_nop_replace();
144 return ftrace_modify_code(rec->ip, old, new);
147 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
149 unsigned const char *new, *old;
150 unsigned long ip = rec->ip;
152 old = ftrace_nop_replace();
153 new = ftrace_call_replace(ip, addr);
155 return ftrace_modify_code(rec->ip, old, new);
158 int ftrace_update_ftrace_func(ftrace_func_t func)
160 unsigned long ip = (unsigned long)(&ftrace_call);
161 unsigned char old[MCOUNT_INSN_SIZE], *new;
162 int ret;
164 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
165 new = ftrace_call_replace(ip, (unsigned long)func);
166 ret = ftrace_modify_code(ip, old, new);
168 return ret;
172 * The modifying_ftrace_code is used to tell the breakpoint
173 * handler to call ftrace_int3_handler(). If it fails to
174 * call this handler for a breakpoint added by ftrace, then
175 * the kernel may crash.
177 * As atomic_writes on x86 do not need a barrier, we do not
178 * need to add smp_mb()s for this to work. It is also considered
179 * that we can not read the modifying_ftrace_code before
180 * executing the breakpoint. That would be quite remarkable if
181 * it could do that. Here's the flow that is required:
183 * CPU-0 CPU-1
185 * atomic_inc(mfc);
186 * write int3s
187 * <trap-int3> // implicit (r)mb
188 * if (atomic_read(mfc))
189 * call ftrace_int3_handler()
191 * Then when we are finished:
193 * atomic_dec(mfc);
195 * If we hit a breakpoint that was not set by ftrace, it does not
196 * matter if ftrace_int3_handler() is called or not. It will
197 * simply be ignored. But it is crucial that a ftrace nop/caller
198 * breakpoint is handled. No other user should ever place a
199 * breakpoint on an ftrace nop/caller location. It must only
200 * be done by this code.
202 atomic_t modifying_ftrace_code __read_mostly;
205 * A breakpoint was added to the code address we are about to
206 * modify, and this is the handle that will just skip over it.
207 * We are either changing a nop into a trace call, or a trace
208 * call to a nop. While the change is taking place, we treat
209 * it just like it was a nop.
211 int ftrace_int3_handler(struct pt_regs *regs)
213 if (WARN_ON_ONCE(!regs))
214 return 0;
216 if (!ftrace_location(regs->ip - 1))
217 return 0;
219 regs->ip += MCOUNT_INSN_SIZE - 1;
221 return 1;
224 static int ftrace_write(unsigned long ip, const char *val, int size)
227 * On x86_64, kernel text mappings are mapped read-only with
228 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
229 * of the kernel text mapping to modify the kernel text.
231 * For 32bit kernels, these mappings are same and we can use
232 * kernel identity mapping to modify code.
234 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
235 ip = (unsigned long)__va(__pa(ip));
237 return probe_kernel_write((void *)ip, val, size);
240 static int add_break(unsigned long ip, const char *old)
242 unsigned char replaced[MCOUNT_INSN_SIZE];
243 unsigned char brk = BREAKPOINT_INSTRUCTION;
245 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
246 return -EFAULT;
248 /* Make sure it is what we expect it to be */
249 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
250 return -EINVAL;
252 if (ftrace_write(ip, &brk, 1))
253 return -EPERM;
255 return 0;
258 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
260 unsigned const char *old;
261 unsigned long ip = rec->ip;
263 old = ftrace_call_replace(ip, addr);
265 return add_break(rec->ip, old);
269 static int add_brk_on_nop(struct dyn_ftrace *rec)
271 unsigned const char *old;
273 old = ftrace_nop_replace();
275 return add_break(rec->ip, old);
278 static int add_breakpoints(struct dyn_ftrace *rec, int enable)
280 unsigned long ftrace_addr;
281 int ret;
283 ret = ftrace_test_record(rec, enable);
285 ftrace_addr = (unsigned long)FTRACE_ADDR;
287 switch (ret) {
288 case FTRACE_UPDATE_IGNORE:
289 return 0;
291 case FTRACE_UPDATE_MAKE_CALL:
292 /* converting nop to call */
293 return add_brk_on_nop(rec);
295 case FTRACE_UPDATE_MAKE_NOP:
296 /* converting a call to a nop */
297 return add_brk_on_call(rec, ftrace_addr);
299 return 0;
303 * On error, we need to remove breakpoints. This needs to
304 * be done caefully. If the address does not currently have a
305 * breakpoint, we know we are done. Otherwise, we look at the
306 * remaining 4 bytes of the instruction. If it matches a nop
307 * we replace the breakpoint with the nop. Otherwise we replace
308 * it with the call instruction.
310 static int remove_breakpoint(struct dyn_ftrace *rec)
312 unsigned char ins[MCOUNT_INSN_SIZE];
313 unsigned char brk = BREAKPOINT_INSTRUCTION;
314 const unsigned char *nop;
315 unsigned long ftrace_addr;
316 unsigned long ip = rec->ip;
318 /* If we fail the read, just give up */
319 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
320 return -EFAULT;
322 /* If this does not have a breakpoint, we are done */
323 if (ins[0] != brk)
324 return -1;
326 nop = ftrace_nop_replace();
329 * If the last 4 bytes of the instruction do not match
330 * a nop, then we assume that this is a call to ftrace_addr.
332 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
334 * For extra paranoidism, we check if the breakpoint is on
335 * a call that would actually jump to the ftrace_addr.
336 * If not, don't touch the breakpoint, we make just create
337 * a disaster.
339 ftrace_addr = (unsigned long)FTRACE_ADDR;
340 nop = ftrace_call_replace(ip, ftrace_addr);
342 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
343 return -EINVAL;
346 return probe_kernel_write((void *)ip, &nop[0], 1);
349 static int add_update_code(unsigned long ip, unsigned const char *new)
351 /* skip breakpoint */
352 ip++;
353 new++;
354 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
355 return -EPERM;
356 return 0;
359 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
361 unsigned long ip = rec->ip;
362 unsigned const char *new;
364 new = ftrace_call_replace(ip, addr);
365 return add_update_code(ip, new);
368 static int add_update_nop(struct dyn_ftrace *rec)
370 unsigned long ip = rec->ip;
371 unsigned const char *new;
373 new = ftrace_nop_replace();
374 return add_update_code(ip, new);
377 static int add_update(struct dyn_ftrace *rec, int enable)
379 unsigned long ftrace_addr;
380 int ret;
382 ret = ftrace_test_record(rec, enable);
384 ftrace_addr = (unsigned long)FTRACE_ADDR;
386 switch (ret) {
387 case FTRACE_UPDATE_IGNORE:
388 return 0;
390 case FTRACE_UPDATE_MAKE_CALL:
391 /* converting nop to call */
392 return add_update_call(rec, ftrace_addr);
394 case FTRACE_UPDATE_MAKE_NOP:
395 /* converting a call to a nop */
396 return add_update_nop(rec);
399 return 0;
402 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
404 unsigned long ip = rec->ip;
405 unsigned const char *new;
407 new = ftrace_call_replace(ip, addr);
409 if (ftrace_write(ip, new, 1))
410 return -EPERM;
412 return 0;
415 static int finish_update_nop(struct dyn_ftrace *rec)
417 unsigned long ip = rec->ip;
418 unsigned const char *new;
420 new = ftrace_nop_replace();
422 if (ftrace_write(ip, new, 1))
423 return -EPERM;
424 return 0;
427 static int finish_update(struct dyn_ftrace *rec, int enable)
429 unsigned long ftrace_addr;
430 int ret;
432 ret = ftrace_update_record(rec, enable);
434 ftrace_addr = (unsigned long)FTRACE_ADDR;
436 switch (ret) {
437 case FTRACE_UPDATE_IGNORE:
438 return 0;
440 case FTRACE_UPDATE_MAKE_CALL:
441 /* converting nop to call */
442 return finish_update_call(rec, ftrace_addr);
444 case FTRACE_UPDATE_MAKE_NOP:
445 /* converting a call to a nop */
446 return finish_update_nop(rec);
449 return 0;
452 static void do_sync_core(void *data)
454 sync_core();
457 static void run_sync(void)
459 int enable_irqs = irqs_disabled();
461 /* We may be called with interrupts disbled (on bootup). */
462 if (enable_irqs)
463 local_irq_enable();
464 on_each_cpu(do_sync_core, NULL, 1);
465 if (enable_irqs)
466 local_irq_disable();
469 void ftrace_replace_code(int enable)
471 struct ftrace_rec_iter *iter;
472 struct dyn_ftrace *rec;
473 const char *report = "adding breakpoints";
474 int count = 0;
475 int ret;
477 for_ftrace_rec_iter(iter) {
478 rec = ftrace_rec_iter_record(iter);
480 ret = add_breakpoints(rec, enable);
481 if (ret)
482 goto remove_breakpoints;
483 count++;
486 run_sync();
488 report = "updating code";
490 for_ftrace_rec_iter(iter) {
491 rec = ftrace_rec_iter_record(iter);
493 ret = add_update(rec, enable);
494 if (ret)
495 goto remove_breakpoints;
498 run_sync();
500 report = "removing breakpoints";
502 for_ftrace_rec_iter(iter) {
503 rec = ftrace_rec_iter_record(iter);
505 ret = finish_update(rec, enable);
506 if (ret)
507 goto remove_breakpoints;
510 run_sync();
512 return;
514 remove_breakpoints:
515 ftrace_bug(ret, rec ? rec->ip : 0);
516 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
517 for_ftrace_rec_iter(iter) {
518 rec = ftrace_rec_iter_record(iter);
519 remove_breakpoint(rec);
523 void arch_ftrace_update_code(int command)
525 /* See comment above by declaration of modifying_ftrace_code */
526 atomic_inc(&modifying_ftrace_code);
528 ftrace_modify_all_code(command);
530 atomic_dec(&modifying_ftrace_code);
533 int __init ftrace_dyn_arch_init(void *data)
535 /* The return code is retured via data */
536 *(unsigned long *)data = 0;
538 return 0;
540 #endif
542 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
544 #ifdef CONFIG_DYNAMIC_FTRACE
545 extern void ftrace_graph_call(void);
547 static int ftrace_mod_jmp(unsigned long ip,
548 int old_offset, int new_offset)
550 unsigned char code[MCOUNT_INSN_SIZE];
552 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
553 return -EFAULT;
555 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
556 return -EINVAL;
558 *(int *)(&code[1]) = new_offset;
560 if (do_ftrace_mod_code(ip, &code))
561 return -EPERM;
563 return 0;
566 int ftrace_enable_ftrace_graph_caller(void)
568 unsigned long ip = (unsigned long)(&ftrace_graph_call);
569 int old_offset, new_offset;
571 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
572 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
574 return ftrace_mod_jmp(ip, old_offset, new_offset);
577 int ftrace_disable_ftrace_graph_caller(void)
579 unsigned long ip = (unsigned long)(&ftrace_graph_call);
580 int old_offset, new_offset;
582 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
583 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
585 return ftrace_mod_jmp(ip, old_offset, new_offset);
588 #endif /* !CONFIG_DYNAMIC_FTRACE */
591 * Hook the return address and push it in the stack of return addrs
592 * in current thread info.
594 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
595 unsigned long frame_pointer)
597 unsigned long old;
598 int faulted;
599 struct ftrace_graph_ent trace;
600 unsigned long return_hooker = (unsigned long)
601 &return_to_handler;
603 if (unlikely(atomic_read(&current->tracing_graph_pause)))
604 return;
607 * Protect against fault, even if it shouldn't
608 * happen. This tool is too much intrusive to
609 * ignore such a protection.
611 asm volatile(
612 "1: " _ASM_MOV " (%[parent]), %[old]\n"
613 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
614 " movl $0, %[faulted]\n"
615 "3:\n"
617 ".section .fixup, \"ax\"\n"
618 "4: movl $1, %[faulted]\n"
619 " jmp 3b\n"
620 ".previous\n"
622 _ASM_EXTABLE(1b, 4b)
623 _ASM_EXTABLE(2b, 4b)
625 : [old] "=&r" (old), [faulted] "=r" (faulted)
626 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
627 : "memory"
630 if (unlikely(faulted)) {
631 ftrace_graph_stop();
632 WARN_ON(1);
633 return;
636 trace.func = self_addr;
637 trace.depth = current->curr_ret_stack + 1;
639 /* Only trace if the calling function expects to */
640 if (!ftrace_graph_entry(&trace)) {
641 *parent = old;
642 return;
645 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
646 frame_pointer) == -EBUSY) {
647 *parent = old;
648 return;
651 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */