2 #ifndef _LINUX_FTRACE_EVENT_H
3 #define _LINUX_FTRACE_EVENT_H
5 #include <linux/ring_buffer.h>
6 #include <linux/trace_seq.h>
7 #include <linux/percpu.h>
8 #include <linux/hardirq.h>
9 #include <linux/perf_event.h>
10 #include <linux/tracepoint.h>
17 struct trace_print_flags
{
22 struct trace_print_flags_u64
{
23 unsigned long long mask
;
27 const char *ftrace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
29 const struct trace_print_flags
*flag_array
);
31 const char *ftrace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
32 const struct trace_print_flags
*symbol_array
);
34 #if BITS_PER_LONG == 32
35 const char *ftrace_print_symbols_seq_u64(struct trace_seq
*p
,
36 unsigned long long val
,
37 const struct trace_print_flags_u64
41 const char *ftrace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
42 unsigned int bitmask_size
);
44 const char *ftrace_print_hex_seq(struct trace_seq
*p
,
45 const unsigned char *buf
, int len
);
47 struct trace_iterator
;
50 int ftrace_raw_output_prep(struct trace_iterator
*iter
,
51 struct trace_event
*event
);
54 * The trace entry - the most basic unit of tracing. This is what
55 * is printed in the end as a single line in the trace output, such as:
57 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
62 unsigned char preempt_count
;
66 #define FTRACE_MAX_EVENT \
67 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
70 * Trace iterator - used by printout routines who present trace
71 * results to users and which routines might sleep, etc:
73 struct trace_iterator
{
74 struct trace_array
*tr
;
76 struct trace_buffer
*trace_buffer
;
80 struct ring_buffer_iter
**buffer_iter
;
81 unsigned long iter_flags
;
83 /* trace_seq for __print_flags() and __print_symbolic() etc. */
84 struct trace_seq tmp_seq
;
86 cpumask_var_t started
;
88 /* it's true when current open file is snapshot */
91 /* The below is zeroed out in pipe_read */
93 struct trace_entry
*ent
;
94 unsigned long lost_events
;
103 /* All new field here will be zeroed out in pipe_read */
106 enum trace_iter_flags
{
107 TRACE_FILE_LAT_FMT
= 1,
108 TRACE_FILE_ANNOTATE
= 2,
109 TRACE_FILE_TIME_IN_NS
= 4,
113 typedef enum print_line_t (*trace_print_func
)(struct trace_iterator
*iter
,
114 int flags
, struct trace_event
*event
);
116 struct trace_event_functions
{
117 trace_print_func trace
;
118 trace_print_func raw
;
119 trace_print_func hex
;
120 trace_print_func binary
;
124 struct hlist_node node
;
125 struct list_head list
;
127 struct trace_event_functions
*funcs
;
130 extern int register_ftrace_event(struct trace_event
*event
);
131 extern int unregister_ftrace_event(struct trace_event
*event
);
133 /* Return values for print_line callback */
135 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
136 TRACE_TYPE_HANDLED
= 1,
137 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
138 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
142 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
143 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
144 * simplifies those functions and keeps them in sync.
146 static inline enum print_line_t
trace_handle_return(struct trace_seq
*s
)
148 return trace_seq_has_overflowed(s
) ?
149 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
152 void tracing_generic_entry_update(struct trace_entry
*entry
,
155 struct ftrace_event_file
;
157 struct ring_buffer_event
*
158 trace_event_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
159 struct ftrace_event_file
*ftrace_file
,
160 int type
, unsigned long len
,
161 unsigned long flags
, int pc
);
162 struct ring_buffer_event
*
163 trace_current_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
164 int type
, unsigned long len
,
165 unsigned long flags
, int pc
);
166 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
167 struct ring_buffer_event
*event
,
168 unsigned long flags
, int pc
);
169 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
170 struct ring_buffer_event
*event
,
171 unsigned long flags
, int pc
);
172 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
173 struct ring_buffer_event
*event
,
174 unsigned long flags
, int pc
,
175 struct pt_regs
*regs
);
176 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
177 struct ring_buffer_event
*event
);
179 void tracing_record_cmdline(struct task_struct
*tsk
);
181 int ftrace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...);
187 TRACE_REG_UNREGISTER
,
188 #ifdef CONFIG_PERF_EVENTS
189 TRACE_REG_PERF_REGISTER
,
190 TRACE_REG_PERF_UNREGISTER
,
192 TRACE_REG_PERF_CLOSE
,
198 struct ftrace_event_call
;
200 struct ftrace_event_class
{
203 #ifdef CONFIG_PERF_EVENTS
206 int (*reg
)(struct ftrace_event_call
*event
,
207 enum trace_reg type
, void *data
);
208 int (*define_fields
)(struct ftrace_event_call
*);
209 struct list_head
*(*get_fields
)(struct ftrace_event_call
*);
210 struct list_head fields
;
211 int (*raw_init
)(struct ftrace_event_call
*);
214 extern int ftrace_event_reg(struct ftrace_event_call
*event
,
215 enum trace_reg type
, void *data
);
217 int ftrace_output_event(struct trace_iterator
*iter
, struct ftrace_event_call
*event
,
220 int ftrace_event_define_field(struct ftrace_event_call
*call
,
221 char *type
, int len
, char *item
, int offset
,
222 int field_size
, int sign
, int filter
);
224 struct ftrace_event_buffer
{
225 struct ring_buffer
*buffer
;
226 struct ring_buffer_event
*event
;
227 struct ftrace_event_file
*ftrace_file
;
233 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer
*fbuffer
,
234 struct ftrace_event_file
*ftrace_file
,
237 void ftrace_event_buffer_commit(struct ftrace_event_buffer
*fbuffer
);
239 int ftrace_event_define_field(struct ftrace_event_call
*call
,
240 char *type
, int len
, char *item
, int offset
,
241 int field_size
, int sign
, int filter
);
244 TRACE_EVENT_FL_FILTERED_BIT
,
245 TRACE_EVENT_FL_CAP_ANY_BIT
,
246 TRACE_EVENT_FL_NO_SET_FILTER_BIT
,
247 TRACE_EVENT_FL_IGNORE_ENABLE_BIT
,
248 TRACE_EVENT_FL_WAS_ENABLED_BIT
,
249 TRACE_EVENT_FL_USE_CALL_FILTER_BIT
,
250 TRACE_EVENT_FL_TRACEPOINT_BIT
,
255 * FILTERED - The event has a filter attached
256 * CAP_ANY - Any user can enable for perf
257 * NO_SET_FILTER - Set when filter has error and is to be ignored
258 * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
259 * WAS_ENABLED - Set and stays set when an event was ever enabled
260 * (used for module unloading, if a module event is enabled,
261 * it is best to clear the buffers that used it).
262 * USE_CALL_FILTER - For ftrace internal events, don't use file filter
263 * TRACEPOINT - Event is a tracepoint
266 TRACE_EVENT_FL_FILTERED
= (1 << TRACE_EVENT_FL_FILTERED_BIT
),
267 TRACE_EVENT_FL_CAP_ANY
= (1 << TRACE_EVENT_FL_CAP_ANY_BIT
),
268 TRACE_EVENT_FL_NO_SET_FILTER
= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT
),
269 TRACE_EVENT_FL_IGNORE_ENABLE
= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT
),
270 TRACE_EVENT_FL_WAS_ENABLED
= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT
),
271 TRACE_EVENT_FL_USE_CALL_FILTER
= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT
),
272 TRACE_EVENT_FL_TRACEPOINT
= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT
),
275 struct ftrace_event_call
{
276 struct list_head list
;
277 struct ftrace_event_class
*class;
280 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
281 struct tracepoint
*tp
;
283 struct trace_event event
;
284 const char *print_fmt
;
285 struct event_filter
*filter
;
289 * bit 0: filter_active
290 * bit 1: allow trace by non root (cap any)
291 * bit 2: failed to apply filter
292 * bit 3: ftrace internal event (do not enable)
293 * bit 4: Event was enabled by module
294 * bit 5: use call filter rather than file filter
295 * bit 6: Event is a tracepoint
297 int flags
; /* static flags of different events */
299 #ifdef CONFIG_PERF_EVENTS
301 struct hlist_head __percpu
*perf_events
;
303 int (*perf_perm
)(struct ftrace_event_call
*,
304 struct perf_event
*);
308 static inline const char *
309 ftrace_event_name(struct ftrace_event_call
*call
)
311 if (call
->flags
& TRACE_EVENT_FL_TRACEPOINT
)
312 return call
->tp
? call
->tp
->name
: NULL
;
318 struct ftrace_subsystem_dir
;
321 FTRACE_EVENT_FL_ENABLED_BIT
,
322 FTRACE_EVENT_FL_RECORDED_CMD_BIT
,
323 FTRACE_EVENT_FL_FILTERED_BIT
,
324 FTRACE_EVENT_FL_NO_SET_FILTER_BIT
,
325 FTRACE_EVENT_FL_SOFT_MODE_BIT
,
326 FTRACE_EVENT_FL_SOFT_DISABLED_BIT
,
327 FTRACE_EVENT_FL_TRIGGER_MODE_BIT
,
328 FTRACE_EVENT_FL_TRIGGER_COND_BIT
,
332 * Ftrace event file flags:
333 * ENABLED - The event is enabled
334 * RECORDED_CMD - The comms should be recorded at sched_switch
335 * FILTERED - The event has a filter attached
336 * NO_SET_FILTER - Set when filter has error and is to be ignored
337 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
338 * SOFT_DISABLED - When set, do not trace the event (even though its
339 * tracepoint may be enabled)
340 * TRIGGER_MODE - When set, invoke the triggers associated with the event
341 * TRIGGER_COND - When set, one or more triggers has an associated filter
344 FTRACE_EVENT_FL_ENABLED
= (1 << FTRACE_EVENT_FL_ENABLED_BIT
),
345 FTRACE_EVENT_FL_RECORDED_CMD
= (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT
),
346 FTRACE_EVENT_FL_FILTERED
= (1 << FTRACE_EVENT_FL_FILTERED_BIT
),
347 FTRACE_EVENT_FL_NO_SET_FILTER
= (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT
),
348 FTRACE_EVENT_FL_SOFT_MODE
= (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT
),
349 FTRACE_EVENT_FL_SOFT_DISABLED
= (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT
),
350 FTRACE_EVENT_FL_TRIGGER_MODE
= (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT
),
351 FTRACE_EVENT_FL_TRIGGER_COND
= (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT
),
354 struct ftrace_event_file
{
355 struct list_head list
;
356 struct ftrace_event_call
*event_call
;
357 struct event_filter
*filter
;
359 struct trace_array
*tr
;
360 struct ftrace_subsystem_dir
*system
;
361 struct list_head triggers
;
366 * bit 1: enabled cmd record
367 * bit 2: enable/disable with the soft disable bit
368 * bit 3: soft disabled
369 * bit 4: trigger enabled
371 * Note: The bits must be set atomically to prevent races
372 * from other writers. Reads of flags do not need to be in
373 * sync as they occur in critical sections. But the way flags
374 * is currently used, these changes do not affect the code
375 * except that when a change is made, it may have a slight
376 * delay in propagating the changes to other CPUs due to
377 * caching and such. Which is mostly OK ;-)
380 atomic_t sm_ref
; /* soft-mode reference counter */
381 atomic_t tm_ref
; /* trigger-mode reference counter */
384 #define __TRACE_EVENT_FLAGS(name, value) \
385 static int __init trace_init_flags_##name(void) \
387 event_##name.flags |= value; \
390 early_initcall(trace_init_flags_##name);
392 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
393 static int perf_perm_##name(struct ftrace_event_call *tp_event, \
394 struct perf_event *p_event) \
396 return ({ expr; }); \
398 static int __init trace_init_perf_perm_##name(void) \
400 event_##name.perf_perm = &perf_perm_##name; \
403 early_initcall(trace_init_perf_perm_##name);
405 #define PERF_MAX_TRACE_SIZE 2048
407 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
409 enum event_trigger_type
{
411 ETT_TRACE_ONOFF
= (1 << 0),
412 ETT_SNAPSHOT
= (1 << 1),
413 ETT_STACKTRACE
= (1 << 2),
414 ETT_EVENT_ENABLE
= (1 << 3),
417 extern int filter_match_preds(struct event_filter
*filter
, void *rec
);
419 extern int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
420 struct ring_buffer
*buffer
,
421 struct ring_buffer_event
*event
);
422 extern int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
423 struct ring_buffer
*buffer
,
424 struct ring_buffer_event
*event
);
425 extern enum event_trigger_type
event_triggers_call(struct ftrace_event_file
*file
,
427 extern void event_triggers_post_call(struct ftrace_event_file
*file
,
428 enum event_trigger_type tt
);
431 * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
432 * @file: The file pointer of the event to test
434 * If any triggers without filters are attached to this event, they
435 * will be called here. If the event is soft disabled and has no
436 * triggers that require testing the fields, it will return true,
440 ftrace_trigger_soft_disabled(struct ftrace_event_file
*file
)
442 unsigned long eflags
= file
->flags
;
444 if (!(eflags
& FTRACE_EVENT_FL_TRIGGER_COND
)) {
445 if (eflags
& FTRACE_EVENT_FL_TRIGGER_MODE
)
446 event_triggers_call(file
, NULL
);
447 if (eflags
& FTRACE_EVENT_FL_SOFT_DISABLED
)
454 * Helper function for event_trigger_unlock_commit{_regs}().
455 * If there are event triggers attached to this event that requires
456 * filtering against its fields, then they wil be called as the
457 * entry already holds the field information of the current event.
459 * It also checks if the event should be discarded or not.
460 * It is to be discarded if the event is soft disabled and the
461 * event was only recorded to process triggers, or if the event
462 * filter is active and this event did not match the filters.
464 * Returns true if the event is discarded, false otherwise.
467 __event_trigger_test_discard(struct ftrace_event_file
*file
,
468 struct ring_buffer
*buffer
,
469 struct ring_buffer_event
*event
,
471 enum event_trigger_type
*tt
)
473 unsigned long eflags
= file
->flags
;
475 if (eflags
& FTRACE_EVENT_FL_TRIGGER_COND
)
476 *tt
= event_triggers_call(file
, entry
);
478 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
))
479 ring_buffer_discard_commit(buffer
, event
);
480 else if (!filter_check_discard(file
, entry
, buffer
, event
))
487 * event_trigger_unlock_commit - handle triggers and finish event commit
488 * @file: The file pointer assoctiated to the event
489 * @buffer: The ring buffer that the event is being written to
490 * @event: The event meta data in the ring buffer
491 * @entry: The event itself
492 * @irq_flags: The state of the interrupts at the start of the event
493 * @pc: The state of the preempt count at the start of the event.
495 * This is a helper function to handle triggers that require data
496 * from the event itself. It also tests the event against filters and
497 * if the event is soft disabled and should be discarded.
500 event_trigger_unlock_commit(struct ftrace_event_file
*file
,
501 struct ring_buffer
*buffer
,
502 struct ring_buffer_event
*event
,
503 void *entry
, unsigned long irq_flags
, int pc
)
505 enum event_trigger_type tt
= ETT_NONE
;
507 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
508 trace_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
511 event_triggers_post_call(file
, tt
);
515 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
516 * @file: The file pointer assoctiated to the event
517 * @buffer: The ring buffer that the event is being written to
518 * @event: The event meta data in the ring buffer
519 * @entry: The event itself
520 * @irq_flags: The state of the interrupts at the start of the event
521 * @pc: The state of the preempt count at the start of the event.
523 * This is a helper function to handle triggers that require data
524 * from the event itself. It also tests the event against filters and
525 * if the event is soft disabled and should be discarded.
527 * Same as event_trigger_unlock_commit() but calls
528 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
531 event_trigger_unlock_commit_regs(struct ftrace_event_file
*file
,
532 struct ring_buffer
*buffer
,
533 struct ring_buffer_event
*event
,
534 void *entry
, unsigned long irq_flags
, int pc
,
535 struct pt_regs
*regs
)
537 enum event_trigger_type tt
= ETT_NONE
;
539 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
540 trace_buffer_unlock_commit_regs(buffer
, event
,
541 irq_flags
, pc
, regs
);
544 event_triggers_post_call(file
, tt
);
549 FILTER_STATIC_STRING
,
555 extern int trace_event_raw_init(struct ftrace_event_call
*call
);
556 extern int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
557 const char *name
, int offset
, int size
,
558 int is_signed
, int filter_type
);
559 extern int trace_add_event_call(struct ftrace_event_call
*call
);
560 extern int trace_remove_event_call(struct ftrace_event_call
*call
);
562 #define is_signed_type(type) (((type)(-1)) < (type)1)
564 int trace_set_clr_event(const char *system
, const char *event
, int set
);
567 * The double __builtin_constant_p is because gcc will give us an error
568 * if we try to allocate the static variable to fmt if it is not a
569 * constant. Even with the outer if statement optimizing out.
571 #define event_trace_printk(ip, fmt, args...) \
573 __trace_printk_check_format(fmt, ##args); \
574 tracing_record_cmdline(current); \
575 if (__builtin_constant_p(fmt)) { \
576 static const char *trace_printk_fmt \
577 __attribute__((section("__trace_printk_fmt"))) = \
578 __builtin_constant_p(fmt) ? fmt : NULL; \
580 __trace_bprintk(ip, trace_printk_fmt, ##args); \
582 __trace_printk(ip, fmt, ##args); \
585 #ifdef CONFIG_PERF_EVENTS
588 DECLARE_PER_CPU(struct pt_regs
, perf_trace_regs
);
590 extern int perf_trace_init(struct perf_event
*event
);
591 extern void perf_trace_destroy(struct perf_event
*event
);
592 extern int perf_trace_add(struct perf_event
*event
, int flags
);
593 extern void perf_trace_del(struct perf_event
*event
, int flags
);
594 extern int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
596 extern void ftrace_profile_free_filter(struct perf_event
*event
);
597 extern void *perf_trace_buf_prepare(int size
, unsigned short type
,
598 struct pt_regs
*regs
, int *rctxp
);
601 perf_trace_buf_submit(void *raw_data
, int size
, int rctx
, u64 addr
,
602 u64 count
, struct pt_regs
*regs
, void *head
,
603 struct task_struct
*task
)
605 perf_tp_event(addr
, count
, raw_data
, size
, regs
, head
, rctx
, task
);
609 #endif /* _LINUX_FTRACE_EVENT_H */