split dev_queue
[cor.git] / kernel / trace / fgraph.c
blob67e0c462b059cb3fcf036ccf2d7962c2b61290e8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure to took into function calls and returns.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 * Highly modified by Steven Rostedt (VMware).
9 */
10 #include <linux/suspend.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
14 #include <trace/events/sched.h>
16 #include "ftrace_internal.h"
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 #define ASSIGN_OPS_HASH(opsname, val) \
20 .func_hash = val, \
21 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
22 #else
23 #define ASSIGN_OPS_HASH(opsname, val)
24 #endif
26 static bool kill_ftrace_graph;
27 int ftrace_graph_active;
29 /* Both enabled by default (can be cleared by function_graph tracer flags */
30 static bool fgraph_sleep_time = true;
32 /**
33 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
35 * ftrace_graph_stop() is called when a severe error is detected in
36 * the function graph tracing. This function is called by the critical
37 * paths of function graph to keep those paths from doing any more harm.
39 bool ftrace_graph_is_dead(void)
41 return kill_ftrace_graph;
44 /**
45 * ftrace_graph_stop - set to permanently disable function graph tracincg
47 * In case of an error int function graph tracing, this is called
48 * to try to keep function graph tracing from causing any more harm.
49 * Usually this is pretty severe and this is called to try to at least
50 * get a warning out to the user.
52 void ftrace_graph_stop(void)
54 kill_ftrace_graph = true;
57 /* Add a function return address to the trace stack on thread info.*/
58 static int
59 ftrace_push_return_trace(unsigned long ret, unsigned long func,
60 unsigned long frame_pointer, unsigned long *retp)
62 unsigned long long calltime;
63 int index;
65 if (unlikely(ftrace_graph_is_dead()))
66 return -EBUSY;
68 if (!current->ret_stack)
69 return -EBUSY;
72 * We must make sure the ret_stack is tested before we read
73 * anything else.
75 smp_rmb();
77 /* The return trace stack is full */
78 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
79 atomic_inc(&current->trace_overrun);
80 return -EBUSY;
83 calltime = trace_clock_local();
85 index = ++current->curr_ret_stack;
86 barrier();
87 current->ret_stack[index].ret = ret;
88 current->ret_stack[index].func = func;
89 current->ret_stack[index].calltime = calltime;
90 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
91 current->ret_stack[index].fp = frame_pointer;
92 #endif
93 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
94 current->ret_stack[index].retp = retp;
95 #endif
96 return 0;
99 int function_graph_enter(unsigned long ret, unsigned long func,
100 unsigned long frame_pointer, unsigned long *retp)
102 struct ftrace_graph_ent trace;
104 trace.func = func;
105 trace.depth = ++current->curr_ret_depth;
107 if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
108 goto out;
110 /* Only trace if the calling function expects to */
111 if (!ftrace_graph_entry(&trace))
112 goto out_ret;
114 return 0;
115 out_ret:
116 current->curr_ret_stack--;
117 out:
118 current->curr_ret_depth--;
119 return -EBUSY;
122 /* Retrieve a function return address to the trace stack on thread info.*/
123 static void
124 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
125 unsigned long frame_pointer)
127 int index;
129 index = current->curr_ret_stack;
131 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
132 ftrace_graph_stop();
133 WARN_ON(1);
134 /* Might as well panic, otherwise we have no where to go */
135 *ret = (unsigned long)panic;
136 return;
139 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
141 * The arch may choose to record the frame pointer used
142 * and check it here to make sure that it is what we expect it
143 * to be. If gcc does not set the place holder of the return
144 * address in the frame pointer, and does a copy instead, then
145 * the function graph trace will fail. This test detects this
146 * case.
148 * Currently, x86_32 with optimize for size (-Os) makes the latest
149 * gcc do the above.
151 * Note, -mfentry does not use frame pointers, and this test
152 * is not needed if CC_USING_FENTRY is set.
154 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
155 ftrace_graph_stop();
156 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
157 " from func %ps return to %lx\n",
158 current->ret_stack[index].fp,
159 frame_pointer,
160 (void *)current->ret_stack[index].func,
161 current->ret_stack[index].ret);
162 *ret = (unsigned long)panic;
163 return;
165 #endif
167 *ret = current->ret_stack[index].ret;
168 trace->func = current->ret_stack[index].func;
169 trace->calltime = current->ret_stack[index].calltime;
170 trace->overrun = atomic_read(&current->trace_overrun);
171 trace->depth = current->curr_ret_depth--;
173 * We still want to trace interrupts coming in if
174 * max_depth is set to 1. Make sure the decrement is
175 * seen before ftrace_graph_return.
177 barrier();
181 * Hibernation protection.
182 * The state of the current task is too much unstable during
183 * suspend/restore to disk. We want to protect against that.
185 static int
186 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
187 void *unused)
189 switch (state) {
190 case PM_HIBERNATION_PREPARE:
191 pause_graph_tracing();
192 break;
194 case PM_POST_HIBERNATION:
195 unpause_graph_tracing();
196 break;
198 return NOTIFY_DONE;
201 static struct notifier_block ftrace_suspend_notifier = {
202 .notifier_call = ftrace_suspend_notifier_call,
206 * Send the trace to the ring-buffer.
207 * @return the original return address.
209 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
211 struct ftrace_graph_ret trace;
212 unsigned long ret;
214 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
215 trace.rettime = trace_clock_local();
216 ftrace_graph_return(&trace);
218 * The ftrace_graph_return() may still access the current
219 * ret_stack structure, we need to make sure the update of
220 * curr_ret_stack is after that.
222 barrier();
223 current->curr_ret_stack--;
225 if (unlikely(!ret)) {
226 ftrace_graph_stop();
227 WARN_ON(1);
228 /* Might as well panic. What else to do? */
229 ret = (unsigned long)panic;
232 return ret;
236 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
237 * @task: The task to read the shadow stack from
238 * @idx: Index down the shadow stack
240 * Return the ret_struct on the shadow stack of the @task at the
241 * call graph at @idx starting with zero. If @idx is zero, it
242 * will return the last saved ret_stack entry. If it is greater than
243 * zero, it will return the corresponding ret_stack for the depth
244 * of saved return addresses.
246 struct ftrace_ret_stack *
247 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
249 idx = task->curr_ret_stack - idx;
251 if (idx >= 0 && idx <= task->curr_ret_stack)
252 return &task->ret_stack[idx];
254 return NULL;
258 * ftrace_graph_ret_addr - convert a potentially modified stack return address
259 * to its original value
261 * This function can be called by stack unwinding code to convert a found stack
262 * return address ('ret') to its original value, in case the function graph
263 * tracer has modified it to be 'return_to_handler'. If the address hasn't
264 * been modified, the unchanged value of 'ret' is returned.
266 * 'idx' is a state variable which should be initialized by the caller to zero
267 * before the first call.
269 * 'retp' is a pointer to the return address on the stack. It's ignored if
270 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
272 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
273 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
274 unsigned long ret, unsigned long *retp)
276 int index = task->curr_ret_stack;
277 int i;
279 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
280 return ret;
282 if (index < 0)
283 return ret;
285 for (i = 0; i <= index; i++)
286 if (task->ret_stack[i].retp == retp)
287 return task->ret_stack[i].ret;
289 return ret;
291 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
292 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
293 unsigned long ret, unsigned long *retp)
295 int task_idx;
297 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
298 return ret;
300 task_idx = task->curr_ret_stack;
302 if (!task->ret_stack || task_idx < *idx)
303 return ret;
305 task_idx -= *idx;
306 (*idx)++;
308 return task->ret_stack[task_idx].ret;
310 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
312 static struct ftrace_ops graph_ops = {
313 .func = ftrace_stub,
314 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
315 FTRACE_OPS_FL_INITIALIZED |
316 FTRACE_OPS_FL_PID |
317 FTRACE_OPS_FL_STUB,
318 #ifdef FTRACE_GRAPH_TRAMP_ADDR
319 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
320 /* trampoline_size is only needed for dynamically allocated tramps */
321 #endif
322 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
325 void ftrace_graph_sleep_time_control(bool enable)
327 fgraph_sleep_time = enable;
330 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
332 return 0;
336 * Simply points to ftrace_stub, but with the proper protocol.
337 * Defined by the linker script in linux/vmlinux.lds.h
339 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
341 /* The callbacks that hook a function */
342 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
343 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
344 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
346 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
347 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
349 int i;
350 int ret = 0;
351 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
352 struct task_struct *g, *t;
354 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
355 ret_stack_list[i] =
356 kmalloc_array(FTRACE_RETFUNC_DEPTH,
357 sizeof(struct ftrace_ret_stack),
358 GFP_KERNEL);
359 if (!ret_stack_list[i]) {
360 start = 0;
361 end = i;
362 ret = -ENOMEM;
363 goto free;
367 read_lock(&tasklist_lock);
368 do_each_thread(g, t) {
369 if (start == end) {
370 ret = -EAGAIN;
371 goto unlock;
374 if (t->ret_stack == NULL) {
375 atomic_set(&t->tracing_graph_pause, 0);
376 atomic_set(&t->trace_overrun, 0);
377 t->curr_ret_stack = -1;
378 t->curr_ret_depth = -1;
379 /* Make sure the tasks see the -1 first: */
380 smp_wmb();
381 t->ret_stack = ret_stack_list[start++];
383 } while_each_thread(g, t);
385 unlock:
386 read_unlock(&tasklist_lock);
387 free:
388 for (i = start; i < end; i++)
389 kfree(ret_stack_list[i]);
390 return ret;
393 static void
394 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
395 struct task_struct *prev, struct task_struct *next)
397 unsigned long long timestamp;
398 int index;
401 * Does the user want to count the time a function was asleep.
402 * If so, do not update the time stamps.
404 if (fgraph_sleep_time)
405 return;
407 timestamp = trace_clock_local();
409 prev->ftrace_timestamp = timestamp;
411 /* only process tasks that we timestamped */
412 if (!next->ftrace_timestamp)
413 return;
416 * Update all the counters in next to make up for the
417 * time next was sleeping.
419 timestamp -= next->ftrace_timestamp;
421 for (index = next->curr_ret_stack; index >= 0; index--)
422 next->ret_stack[index].calltime += timestamp;
425 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
427 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
428 return 0;
429 return __ftrace_graph_entry(trace);
433 * The function graph tracer should only trace the functions defined
434 * by set_ftrace_filter and set_ftrace_notrace. If another function
435 * tracer ops is registered, the graph tracer requires testing the
436 * function against the global ops, and not just trace any function
437 * that any ftrace_ops registered.
439 void update_function_graph_func(void)
441 struct ftrace_ops *op;
442 bool do_test = false;
445 * The graph and global ops share the same set of functions
446 * to test. If any other ops is on the list, then
447 * the graph tracing needs to test if its the function
448 * it should call.
450 do_for_each_ftrace_op(op, ftrace_ops_list) {
451 if (op != &global_ops && op != &graph_ops &&
452 op != &ftrace_list_end) {
453 do_test = true;
454 /* in double loop, break out with goto */
455 goto out;
457 } while_for_each_ftrace_op(op);
458 out:
459 if (do_test)
460 ftrace_graph_entry = ftrace_graph_entry_test;
461 else
462 ftrace_graph_entry = __ftrace_graph_entry;
465 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
467 static void
468 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
470 atomic_set(&t->tracing_graph_pause, 0);
471 atomic_set(&t->trace_overrun, 0);
472 t->ftrace_timestamp = 0;
473 /* make curr_ret_stack visible before we add the ret_stack */
474 smp_wmb();
475 t->ret_stack = ret_stack;
479 * Allocate a return stack for the idle task. May be the first
480 * time through, or it may be done by CPU hotplug online.
482 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
484 t->curr_ret_stack = -1;
485 t->curr_ret_depth = -1;
487 * The idle task has no parent, it either has its own
488 * stack or no stack at all.
490 if (t->ret_stack)
491 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
493 if (ftrace_graph_active) {
494 struct ftrace_ret_stack *ret_stack;
496 ret_stack = per_cpu(idle_ret_stack, cpu);
497 if (!ret_stack) {
498 ret_stack =
499 kmalloc_array(FTRACE_RETFUNC_DEPTH,
500 sizeof(struct ftrace_ret_stack),
501 GFP_KERNEL);
502 if (!ret_stack)
503 return;
504 per_cpu(idle_ret_stack, cpu) = ret_stack;
506 graph_init_task(t, ret_stack);
510 /* Allocate a return stack for newly created task */
511 void ftrace_graph_init_task(struct task_struct *t)
513 /* Make sure we do not use the parent ret_stack */
514 t->ret_stack = NULL;
515 t->curr_ret_stack = -1;
516 t->curr_ret_depth = -1;
518 if (ftrace_graph_active) {
519 struct ftrace_ret_stack *ret_stack;
521 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
522 sizeof(struct ftrace_ret_stack),
523 GFP_KERNEL);
524 if (!ret_stack)
525 return;
526 graph_init_task(t, ret_stack);
530 void ftrace_graph_exit_task(struct task_struct *t)
532 struct ftrace_ret_stack *ret_stack = t->ret_stack;
534 t->ret_stack = NULL;
535 /* NULL must become visible to IRQs before we free it: */
536 barrier();
538 kfree(ret_stack);
541 /* Allocate a return stack for each task */
542 static int start_graph_tracing(void)
544 struct ftrace_ret_stack **ret_stack_list;
545 int ret, cpu;
547 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
548 sizeof(struct ftrace_ret_stack *),
549 GFP_KERNEL);
551 if (!ret_stack_list)
552 return -ENOMEM;
554 /* The cpu_boot init_task->ret_stack will never be freed */
555 for_each_online_cpu(cpu) {
556 if (!idle_task(cpu)->ret_stack)
557 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
560 do {
561 ret = alloc_retstack_tasklist(ret_stack_list);
562 } while (ret == -EAGAIN);
564 if (!ret) {
565 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
566 if (ret)
567 pr_info("ftrace_graph: Couldn't activate tracepoint"
568 " probe to kernel_sched_switch\n");
571 kfree(ret_stack_list);
572 return ret;
575 int register_ftrace_graph(struct fgraph_ops *gops)
577 int ret = 0;
579 mutex_lock(&ftrace_lock);
581 /* we currently allow only one tracer registered at a time */
582 if (ftrace_graph_active) {
583 ret = -EBUSY;
584 goto out;
587 register_pm_notifier(&ftrace_suspend_notifier);
589 ftrace_graph_active++;
590 ret = start_graph_tracing();
591 if (ret) {
592 ftrace_graph_active--;
593 goto out;
596 ftrace_graph_return = gops->retfunc;
599 * Update the indirect function to the entryfunc, and the
600 * function that gets called to the entry_test first. Then
601 * call the update fgraph entry function to determine if
602 * the entryfunc should be called directly or not.
604 __ftrace_graph_entry = gops->entryfunc;
605 ftrace_graph_entry = ftrace_graph_entry_test;
606 update_function_graph_func();
608 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
609 out:
610 mutex_unlock(&ftrace_lock);
611 return ret;
614 void unregister_ftrace_graph(struct fgraph_ops *gops)
616 mutex_lock(&ftrace_lock);
618 if (unlikely(!ftrace_graph_active))
619 goto out;
621 ftrace_graph_active--;
622 ftrace_graph_return = ftrace_stub_graph;
623 ftrace_graph_entry = ftrace_graph_entry_stub;
624 __ftrace_graph_entry = ftrace_graph_entry_stub;
625 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
626 unregister_pm_notifier(&ftrace_suspend_notifier);
627 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
629 out:
630 mutex_unlock(&ftrace_lock);