1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
12 #include <trace/kmemtrace.h>
13 #include <trace/power.h>
16 __TRACE_FIRST_TYPE
= 0,
45 * The trace entry - the most basic unit of tracing. This is what
46 * is printed in the end as a single line in the trace output, such as:
48 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
53 unsigned char preempt_count
;
59 * Function trace entry - function address and parent function addres:
62 struct trace_entry ent
;
64 unsigned long parent_ip
;
67 /* Function call entry */
68 struct ftrace_graph_ent_entry
{
69 struct trace_entry ent
;
70 struct ftrace_graph_ent graph_ent
;
73 /* Function return entry */
74 struct ftrace_graph_ret_entry
{
75 struct trace_entry ent
;
76 struct ftrace_graph_ret ret
;
78 extern struct tracer boot_tracer
;
81 * Context switch trace entry - which task (and prio) we switched from/to:
83 struct ctx_switch_entry
{
84 struct trace_entry ent
;
85 unsigned int prev_pid
;
86 unsigned char prev_prio
;
87 unsigned char prev_state
;
88 unsigned int next_pid
;
89 unsigned char next_prio
;
90 unsigned char next_state
;
91 unsigned int next_cpu
;
95 * Special (free-form) trace entry:
97 struct special_entry
{
98 struct trace_entry ent
;
108 #define FTRACE_STACK_ENTRIES 8
111 struct trace_entry ent
;
112 unsigned long caller
[FTRACE_STACK_ENTRIES
];
115 struct userstack_entry
{
116 struct trace_entry ent
;
117 unsigned long caller
[FTRACE_STACK_ENTRIES
];
121 * trace_printk entry:
123 struct bprint_entry
{
124 struct trace_entry ent
;
132 struct trace_entry ent
;
138 #define TRACE_OLD_SIZE 88
140 struct trace_field_cont
{
142 /* Temporary till we get rid of this completely */
143 char buf
[TRACE_OLD_SIZE
- 1];
146 struct trace_mmiotrace_rw
{
147 struct trace_entry ent
;
148 struct mmiotrace_rw rw
;
151 struct trace_mmiotrace_map
{
152 struct trace_entry ent
;
153 struct mmiotrace_map map
;
156 struct trace_boot_call
{
157 struct trace_entry ent
;
158 struct boot_trace_call boot_call
;
161 struct trace_boot_ret
{
162 struct trace_entry ent
;
163 struct boot_trace_ret boot_ret
;
166 #define TRACE_FUNC_SIZE 30
167 #define TRACE_FILE_SIZE 20
168 struct trace_branch
{
169 struct trace_entry ent
;
171 char func
[TRACE_FUNC_SIZE
+1];
172 char file
[TRACE_FILE_SIZE
+1];
176 struct hw_branch_entry
{
177 struct trace_entry ent
;
183 struct trace_entry ent
;
184 struct power_trace state_data
;
187 struct kmemtrace_alloc_entry
{
188 struct trace_entry ent
;
189 enum kmemtrace_type_id type_id
;
190 unsigned long call_site
;
198 struct kmemtrace_free_entry
{
199 struct trace_entry ent
;
200 enum kmemtrace_type_id type_id
;
201 unsigned long call_site
;
206 * trace_flag_type is an enumeration that holds different
207 * states when a trace occurs. These are:
208 * IRQS_OFF - interrupts were disabled
209 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
210 * NEED_RESCED - reschedule is requested
211 * HARDIRQ - inside an interrupt handler
212 * SOFTIRQ - inside a softirq handler
214 enum trace_flag_type
{
215 TRACE_FLAG_IRQS_OFF
= 0x01,
216 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
217 TRACE_FLAG_NEED_RESCHED
= 0x04,
218 TRACE_FLAG_HARDIRQ
= 0x08,
219 TRACE_FLAG_SOFTIRQ
= 0x10,
222 #define TRACE_BUF_SIZE 1024
225 * The CPU trace array - it consists of thousands of trace entries
226 * plus some other descriptor data: (for example which task started
229 struct trace_array_cpu
{
231 void *buffer_page
; /* ring buffer spare */
233 /* these fields get copied into max-trace: */
234 unsigned long trace_idx
;
235 unsigned long overrun
;
236 unsigned long saved_latency
;
237 unsigned long critical_start
;
238 unsigned long critical_end
;
239 unsigned long critical_sequence
;
241 unsigned long policy
;
242 unsigned long rt_priority
;
243 cycle_t preempt_timestamp
;
246 char comm
[TASK_COMM_LEN
];
249 struct trace_iterator
;
252 * The trace array - an array of per-CPU trace arrays. This is the
253 * highest level data structure that individual tracers deal with.
254 * They have on/off state as well:
257 struct ring_buffer
*buffer
;
258 unsigned long entries
;
261 struct task_struct
*waiter
;
262 struct trace_array_cpu
*data
[NR_CPUS
];
265 #define FTRACE_CMP_TYPE(var, type) \
266 __builtin_types_compatible_p(typeof(var), type *)
269 #define IF_ASSIGN(var, entry, etype, id) \
270 if (FTRACE_CMP_TYPE(var, etype)) { \
271 var = (typeof(var))(entry); \
272 WARN_ON(id && (entry)->type != id); \
276 /* Will cause compile errors if type is not found. */
277 extern void __ftrace_bad_type(void);
280 * The trace_assign_type is a verifier that the entry type is
281 * the same as the type being assigned. To add new types simply
282 * add a line with the following format:
284 * IF_ASSIGN(var, ent, type, id);
286 * Where "type" is the trace type that includes the trace_entry
287 * as the "ent" item. And "id" is the trace identifier that is
288 * used in the trace_type enum.
290 * If the type can have more than one id, then use zero.
292 #define trace_assign_type(var, ent) \
294 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
295 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
296 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
297 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
298 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
299 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
300 IF_ASSIGN(var, ent, struct special_entry, 0); \
301 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
303 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
305 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
306 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
307 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
308 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
310 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
312 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
313 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
314 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
316 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
318 __ftrace_bad_type(); \
321 /* Return values for print_line callback */
323 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
324 TRACE_TYPE_HANDLED
= 1,
325 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
326 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
331 * An option specific to a tracer. This is a boolean value.
332 * The bit is the bit index that sets its value on the
333 * flags value in struct tracer_flags.
336 const char *name
; /* Will appear on the trace_options file */
337 u32 bit
; /* Mask assigned in val field in tracer_flags */
341 * The set of specific options for a tracer. Your tracer
342 * have to set the initial value of the flags val.
344 struct tracer_flags
{
346 struct tracer_opt
*opts
;
349 /* Makes more easy to define a tracer opt */
350 #define TRACER_OPT(s, b) .name = #s, .bit = b
354 * struct tracer - a specific tracer and its callbacks to interact with debugfs
355 * @name: the name chosen to select it on the available_tracers file
356 * @init: called when one switches to this tracer (echo name > current_tracer)
357 * @reset: called when one switches to another tracer
358 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
359 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
360 * @open: called when the trace file is opened
361 * @pipe_open: called when the trace_pipe file is opened
362 * @wait_pipe: override how the user waits for traces on trace_pipe
363 * @close: called when the trace file is released
364 * @read: override the default read callback on trace_pipe
365 * @splice_read: override the default splice_read callback on trace_pipe
366 * @selftest: selftest to run on boot (see trace_selftest.c)
367 * @print_headers: override the first lines that describe your columns
368 * @print_line: callback that prints a trace
369 * @set_flag: signals one of your private flags changed (trace_options file)
370 * @flags: your private flags
374 int (*init
)(struct trace_array
*tr
);
375 void (*reset
)(struct trace_array
*tr
);
376 void (*start
)(struct trace_array
*tr
);
377 void (*stop
)(struct trace_array
*tr
);
378 void (*open
)(struct trace_iterator
*iter
);
379 void (*pipe_open
)(struct trace_iterator
*iter
);
380 void (*wait_pipe
)(struct trace_iterator
*iter
);
381 void (*close
)(struct trace_iterator
*iter
);
382 ssize_t (*read
)(struct trace_iterator
*iter
,
383 struct file
*filp
, char __user
*ubuf
,
384 size_t cnt
, loff_t
*ppos
);
385 ssize_t (*splice_read
)(struct trace_iterator
*iter
,
388 struct pipe_inode_info
*pipe
,
391 #ifdef CONFIG_FTRACE_STARTUP_TEST
392 int (*selftest
)(struct tracer
*trace
,
393 struct trace_array
*tr
);
395 void (*print_header
)(struct seq_file
*m
);
396 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
397 /* If you handled the flag setting, return 0 */
398 int (*set_flag
)(u32 old_flags
, u32 bit
, int set
);
401 struct tracer_flags
*flags
;
402 struct tracer_stat
*stats
;
406 unsigned char buffer
[PAGE_SIZE
];
408 unsigned int readpos
;
412 trace_seq_init(struct trace_seq
*s
)
419 #define TRACE_PIPE_ALL_CPU -1
422 * Trace iterator - used by printout routines who present trace
423 * results to users and which routines might sleep, etc:
425 struct trace_iterator
{
426 struct trace_array
*tr
;
427 struct tracer
*trace
;
431 struct ring_buffer_iter
*buffer_iter
[NR_CPUS
];
433 /* The below is zeroed out in pipe_read */
434 struct trace_seq seq
;
435 struct trace_entry
*ent
;
439 unsigned long iter_flags
;
443 cpumask_var_t started
;
446 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
447 int tracing_is_enabled(void);
448 void trace_wake_up(void);
449 void tracing_reset(struct trace_array
*tr
, int cpu
);
450 void tracing_reset_online_cpus(struct trace_array
*tr
);
451 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
452 struct dentry
*tracing_init_dentry(void);
453 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
);
455 struct ring_buffer_event
;
457 struct ring_buffer_event
*trace_buffer_lock_reserve(struct trace_array
*tr
,
462 void trace_buffer_unlock_commit(struct trace_array
*tr
,
463 struct ring_buffer_event
*event
,
464 unsigned long flags
, int pc
);
466 struct ring_buffer_event
*
467 trace_current_buffer_lock_reserve(unsigned char type
, unsigned long len
,
468 unsigned long flags
, int pc
);
469 void trace_current_buffer_unlock_commit(struct ring_buffer_event
*event
,
470 unsigned long flags
, int pc
);
472 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
473 struct trace_array_cpu
*data
);
475 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
476 int *ent_cpu
, u64
*ent_ts
);
478 void tracing_generic_entry_update(struct trace_entry
*entry
,
482 void default_wait_pipe(struct trace_iterator
*iter
);
483 void poll_wait_pipe(struct trace_iterator
*iter
);
485 void ftrace(struct trace_array
*tr
,
486 struct trace_array_cpu
*data
,
488 unsigned long parent_ip
,
489 unsigned long flags
, int pc
);
490 void tracing_sched_switch_trace(struct trace_array
*tr
,
491 struct task_struct
*prev
,
492 struct task_struct
*next
,
493 unsigned long flags
, int pc
);
494 void tracing_record_cmdline(struct task_struct
*tsk
);
496 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
497 struct task_struct
*wakee
,
498 struct task_struct
*cur
,
499 unsigned long flags
, int pc
);
500 void trace_special(struct trace_array
*tr
,
501 struct trace_array_cpu
*data
,
504 unsigned long arg3
, int pc
);
505 void trace_function(struct trace_array
*tr
,
507 unsigned long parent_ip
,
508 unsigned long flags
, int pc
);
510 void trace_graph_return(struct ftrace_graph_ret
*trace
);
511 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
513 void tracing_start_cmdline_record(void);
514 void tracing_stop_cmdline_record(void);
515 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
516 void tracing_stop_sched_switch_record(void);
517 void tracing_start_sched_switch_record(void);
518 int register_tracer(struct tracer
*type
);
519 void unregister_tracer(struct tracer
*type
);
521 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
523 extern unsigned long tracing_max_latency
;
524 extern unsigned long tracing_thresh
;
526 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
527 void update_max_tr_single(struct trace_array
*tr
,
528 struct task_struct
*tsk
, int cpu
);
530 void __trace_stack(struct trace_array
*tr
,
534 extern cycle_t
ftrace_now(int cpu
);
536 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
538 (*tracer_switch_func_t
)(void *private,
540 struct task_struct
*prev
,
541 struct task_struct
*next
);
543 struct tracer_switch_ops
{
544 tracer_switch_func_t func
;
546 struct tracer_switch_ops
*next
;
548 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
550 extern char *trace_find_cmdline(int pid
);
552 #ifdef CONFIG_DYNAMIC_FTRACE
553 extern unsigned long ftrace_update_tot_cnt
;
554 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
555 extern int DYN_FTRACE_TEST_NAME(void);
558 #ifdef CONFIG_FTRACE_STARTUP_TEST
559 extern int trace_selftest_startup_function(struct tracer
*trace
,
560 struct trace_array
*tr
);
561 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
562 struct trace_array
*tr
);
563 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
564 struct trace_array
*tr
);
565 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
566 struct trace_array
*tr
);
567 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
568 struct trace_array
*tr
);
569 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
570 struct trace_array
*tr
);
571 extern int trace_selftest_startup_nop(struct tracer
*trace
,
572 struct trace_array
*tr
);
573 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
574 struct trace_array
*tr
);
575 extern int trace_selftest_startup_sysprof(struct tracer
*trace
,
576 struct trace_array
*tr
);
577 extern int trace_selftest_startup_branch(struct tracer
*trace
,
578 struct trace_array
*tr
);
579 #endif /* CONFIG_FTRACE_STARTUP_TEST */
581 extern void *head_page(struct trace_array_cpu
*data
);
582 extern long ns2usecs(cycle_t nsec
);
584 trace_vbprintk(unsigned long ip
, int depth
, const char *fmt
, va_list args
);
586 trace_vprintk(unsigned long ip
, int depth
, const char *fmt
, va_list args
);
588 extern unsigned long trace_flags
;
590 /* Standard output formatting function used for function return traces */
591 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
592 extern enum print_line_t
print_graph_function(struct trace_iterator
*iter
);
594 #ifdef CONFIG_DYNAMIC_FTRACE
595 /* TODO: make this variable */
596 #define FTRACE_GRAPH_MAX_FUNCS 32
597 extern int ftrace_graph_count
;
598 extern unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
];
600 static inline int ftrace_graph_addr(unsigned long addr
)
604 if (!ftrace_graph_count
|| test_tsk_trace_graph(current
))
607 for (i
= 0; i
< ftrace_graph_count
; i
++) {
608 if (addr
== ftrace_graph_funcs
[i
])
615 static inline int ftrace_trace_addr(unsigned long addr
)
619 static inline int ftrace_graph_addr(unsigned long addr
)
623 #endif /* CONFIG_DYNAMIC_FTRACE */
625 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
626 static inline enum print_line_t
627 print_graph_function(struct trace_iterator
*iter
)
629 return TRACE_TYPE_UNHANDLED
;
631 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
633 extern struct pid
*ftrace_pid_trace
;
635 static inline int ftrace_trace_task(struct task_struct
*task
)
637 if (!ftrace_pid_trace
)
640 return test_tsk_trace_trace(task
);
644 * trace_iterator_flags is an enumeration that defines bit
645 * positions into trace_flags that controls the output.
647 * NOTE: These bits must match the trace_options array in
650 enum trace_iterator_flags
{
651 TRACE_ITER_PRINT_PARENT
= 0x01,
652 TRACE_ITER_SYM_OFFSET
= 0x02,
653 TRACE_ITER_SYM_ADDR
= 0x04,
654 TRACE_ITER_VERBOSE
= 0x08,
655 TRACE_ITER_RAW
= 0x10,
656 TRACE_ITER_HEX
= 0x20,
657 TRACE_ITER_BIN
= 0x40,
658 TRACE_ITER_BLOCK
= 0x80,
659 TRACE_ITER_STACKTRACE
= 0x100,
660 TRACE_ITER_SCHED_TREE
= 0x200,
661 TRACE_ITER_PRINTK
= 0x400,
662 TRACE_ITER_PREEMPTONLY
= 0x800,
663 TRACE_ITER_BRANCH
= 0x1000,
664 TRACE_ITER_ANNOTATE
= 0x2000,
665 TRACE_ITER_USERSTACKTRACE
= 0x4000,
666 TRACE_ITER_SYM_USEROBJ
= 0x8000,
667 TRACE_ITER_PRINTK_MSGONLY
= 0x10000,
668 TRACE_ITER_CONTEXT_INFO
= 0x20000, /* Print pid/cpu/time */
669 TRACE_ITER_LATENCY_FMT
= 0x40000,
673 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
674 * control the output of kernel symbols.
676 #define TRACE_ITER_SYM_MASK \
677 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
679 extern struct tracer nop_trace
;
682 * ftrace_preempt_disable - disable preemption scheduler safe
684 * When tracing can happen inside the scheduler, there exists
685 * cases that the tracing might happen before the need_resched
686 * flag is checked. If this happens and the tracer calls
687 * preempt_enable (after a disable), a schedule might take place
688 * causing an infinite recursion.
690 * To prevent this, we read the need_resched flag before
691 * disabling preemption. When we want to enable preemption we
692 * check the flag, if it is set, then we call preempt_enable_no_resched.
693 * Otherwise, we call preempt_enable.
695 * The rational for doing the above is that if need_resched is set
696 * and we have yet to reschedule, we are either in an atomic location
697 * (where we do not need to check for scheduling) or we are inside
698 * the scheduler and do not want to resched.
700 static inline int ftrace_preempt_disable(void)
704 resched
= need_resched();
705 preempt_disable_notrace();
711 * ftrace_preempt_enable - enable preemption scheduler safe
712 * @resched: the return value from ftrace_preempt_disable
714 * This is a scheduler safe way to enable preemption and not miss
715 * any preemption checks. The disabled saved the state of preemption.
716 * If resched is set, then we are either inside an atomic or
717 * are inside the scheduler (we would have already scheduled
718 * otherwise). In this case, we do not want to call normal
719 * preempt_enable, but preempt_enable_no_resched instead.
721 static inline void ftrace_preempt_enable(int resched
)
724 preempt_enable_no_resched_notrace();
726 preempt_enable_notrace();
729 #ifdef CONFIG_BRANCH_TRACER
730 extern int enable_branch_tracing(struct trace_array
*tr
);
731 extern void disable_branch_tracing(void);
732 static inline int trace_branch_enable(struct trace_array
*tr
)
734 if (trace_flags
& TRACE_ITER_BRANCH
)
735 return enable_branch_tracing(tr
);
738 static inline void trace_branch_disable(void)
740 /* due to races, always disable */
741 disable_branch_tracing();
744 static inline int trace_branch_enable(struct trace_array
*tr
)
748 static inline void trace_branch_disable(void)
751 #endif /* CONFIG_BRANCH_TRACER */
753 /* set ring buffers to default size if not already done so */
754 int tracing_update_buffers(void);
756 /* trace event type bit fields, not numeric */
758 TRACE_EVENT_TYPE_PRINTF
= 1,
759 TRACE_EVENT_TYPE_RAW
= 2,
762 struct ftrace_event_call
{
767 int (*regfunc
)(void);
768 void (*unregfunc
)(void);
770 int (*raw_init
)(void);
771 int (*show_format
)(struct trace_seq
*s
);
774 void event_trace_printk(unsigned long ip
, const char *fmt
, ...);
775 extern struct ftrace_event_call __start_ftrace_events
[];
776 extern struct ftrace_event_call __stop_ftrace_events
[];
778 extern const char *__start___trace_bprintk_fmt
[];
779 extern const char *__stop___trace_bprintk_fmt
[];
782 * The double __builtin_constant_p is because gcc will give us an error
783 * if we try to allocate the static variable to fmt if it is not a
784 * constant. Even with the outer if statement optimizing out.
786 #define event_trace_printk(ip, fmt, args...) \
788 __trace_printk_check_format(fmt, ##args); \
789 tracing_record_cmdline(current); \
790 if (__builtin_constant_p(fmt)) { \
791 static const char *trace_printk_fmt \
792 __attribute__((section("__trace_printk_fmt"))) = \
793 __builtin_constant_p(fmt) ? fmt : NULL; \
795 __trace_bprintk(ip, trace_printk_fmt, ##args); \
797 __trace_printk(ip, fmt, ##args); \
800 #endif /* _LINUX_KERNEL_TRACE_H */