block: share request flush fields with elevator_private
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / ftrace_event.h
blob8beabb958f61d5147c8893f1e780415a91fcb2e6
1 #ifndef _LINUX_FTRACE_EVENT_H
2 #define _LINUX_FTRACE_EVENT_H
4 #include <linux/ring_buffer.h>
5 #include <linux/trace_seq.h>
6 #include <linux/percpu.h>
7 #include <linux/hardirq.h>
8 #include <linux/perf_event.h>
10 struct trace_array;
11 struct tracer;
12 struct dentry;
14 struct trace_print_flags {
15 unsigned long mask;
16 const char *name;
19 const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
20 unsigned long flags,
21 const struct trace_print_flags *flag_array);
23 const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
24 const struct trace_print_flags *symbol_array);
26 const char *ftrace_print_hex_seq(struct trace_seq *p,
27 const unsigned char *buf, int len);
30 * The trace entry - the most basic unit of tracing. This is what
31 * is printed in the end as a single line in the trace output, such as:
33 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
35 struct trace_entry {
36 unsigned short type;
37 unsigned char flags;
38 unsigned char preempt_count;
39 int pid;
40 int lock_depth;
43 #define FTRACE_MAX_EVENT \
44 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
47 * Trace iterator - used by printout routines who present trace
48 * results to users and which routines might sleep, etc:
50 struct trace_iterator {
51 struct trace_array *tr;
52 struct tracer *trace;
53 void *private;
54 int cpu_file;
55 struct mutex mutex;
56 struct ring_buffer_iter *buffer_iter[NR_CPUS];
57 unsigned long iter_flags;
59 /* trace_seq for __print_flags() and __print_symbolic() etc. */
60 struct trace_seq tmp_seq;
62 /* The below is zeroed out in pipe_read */
63 struct trace_seq seq;
64 struct trace_entry *ent;
65 unsigned long lost_events;
66 int leftover;
67 int cpu;
68 u64 ts;
70 loff_t pos;
71 long idx;
73 cpumask_var_t started;
77 struct trace_event;
79 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
80 int flags, struct trace_event *event);
82 struct trace_event_functions {
83 trace_print_func trace;
84 trace_print_func raw;
85 trace_print_func hex;
86 trace_print_func binary;
89 struct trace_event {
90 struct hlist_node node;
91 struct list_head list;
92 int type;
93 struct trace_event_functions *funcs;
96 extern int register_ftrace_event(struct trace_event *event);
97 extern int unregister_ftrace_event(struct trace_event *event);
99 /* Return values for print_line callback */
100 enum print_line_t {
101 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
102 TRACE_TYPE_HANDLED = 1,
103 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
104 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
107 void tracing_generic_entry_update(struct trace_entry *entry,
108 unsigned long flags,
109 int pc);
110 struct ring_buffer_event *
111 trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
112 int type, unsigned long len,
113 unsigned long flags, int pc);
114 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
115 struct ring_buffer_event *event,
116 unsigned long flags, int pc);
117 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
118 struct ring_buffer_event *event,
119 unsigned long flags, int pc);
120 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
121 struct ring_buffer_event *event);
123 void tracing_record_cmdline(struct task_struct *tsk);
125 struct event_filter;
127 enum trace_reg {
128 TRACE_REG_REGISTER,
129 TRACE_REG_UNREGISTER,
130 TRACE_REG_PERF_REGISTER,
131 TRACE_REG_PERF_UNREGISTER,
134 struct ftrace_event_call;
136 struct ftrace_event_class {
137 char *system;
138 void *probe;
139 #ifdef CONFIG_PERF_EVENTS
140 void *perf_probe;
141 #endif
142 int (*reg)(struct ftrace_event_call *event,
143 enum trace_reg type);
144 int (*define_fields)(struct ftrace_event_call *);
145 struct list_head *(*get_fields)(struct ftrace_event_call *);
146 struct list_head fields;
147 int (*raw_init)(struct ftrace_event_call *);
150 extern int ftrace_event_reg(struct ftrace_event_call *event,
151 enum trace_reg type);
153 enum {
154 TRACE_EVENT_FL_ENABLED_BIT,
155 TRACE_EVENT_FL_FILTERED_BIT,
156 TRACE_EVENT_FL_RECORDED_CMD_BIT,
159 enum {
160 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
161 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
162 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
165 struct ftrace_event_call {
166 struct list_head list;
167 struct ftrace_event_class *class;
168 char *name;
169 struct dentry *dir;
170 struct trace_event event;
171 const char *print_fmt;
172 struct event_filter *filter;
173 void *mod;
174 void *data;
177 * 32 bit flags:
178 * bit 1: enabled
179 * bit 2: filter_active
180 * bit 3: enabled cmd record
182 * Changes to flags must hold the event_mutex.
184 * Note: Reads of flags do not hold the event_mutex since
185 * they occur in critical sections. But the way flags
186 * is currently used, these changes do no affect the code
187 * except that when a change is made, it may have a slight
188 * delay in propagating the changes to other CPUs due to
189 * caching and such.
191 unsigned int flags;
193 #ifdef CONFIG_PERF_EVENTS
194 int perf_refcount;
195 struct hlist_head __percpu *perf_events;
196 #endif
199 #define PERF_MAX_TRACE_SIZE 2048
201 #define MAX_FILTER_PRED 32
202 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
204 extern void destroy_preds(struct ftrace_event_call *call);
205 extern int filter_match_preds(struct event_filter *filter, void *rec);
206 extern int filter_current_check_discard(struct ring_buffer *buffer,
207 struct ftrace_event_call *call,
208 void *rec,
209 struct ring_buffer_event *event);
211 enum {
212 FILTER_OTHER = 0,
213 FILTER_STATIC_STRING,
214 FILTER_DYN_STRING,
215 FILTER_PTR_STRING,
218 extern int trace_event_raw_init(struct ftrace_event_call *call);
219 extern int trace_define_field(struct ftrace_event_call *call, const char *type,
220 const char *name, int offset, int size,
221 int is_signed, int filter_type);
222 extern int trace_add_event_call(struct ftrace_event_call *call);
223 extern void trace_remove_event_call(struct ftrace_event_call *call);
225 #define is_signed_type(type) (((type)(-1)) < 0)
227 int trace_set_clr_event(const char *system, const char *event, int set);
230 * The double __builtin_constant_p is because gcc will give us an error
231 * if we try to allocate the static variable to fmt if it is not a
232 * constant. Even with the outer if statement optimizing out.
234 #define event_trace_printk(ip, fmt, args...) \
235 do { \
236 __trace_printk_check_format(fmt, ##args); \
237 tracing_record_cmdline(current); \
238 if (__builtin_constant_p(fmt)) { \
239 static const char *trace_printk_fmt \
240 __attribute__((section("__trace_printk_fmt"))) = \
241 __builtin_constant_p(fmt) ? fmt : NULL; \
243 __trace_bprintk(ip, trace_printk_fmt, ##args); \
244 } else \
245 __trace_printk(ip, fmt, ##args); \
246 } while (0)
248 #ifdef CONFIG_PERF_EVENTS
249 struct perf_event;
251 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
253 extern int perf_trace_init(struct perf_event *event);
254 extern void perf_trace_destroy(struct perf_event *event);
255 extern int perf_trace_add(struct perf_event *event, int flags);
256 extern void perf_trace_del(struct perf_event *event, int flags);
257 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
258 char *filter_str);
259 extern void ftrace_profile_free_filter(struct perf_event *event);
260 extern void *perf_trace_buf_prepare(int size, unsigned short type,
261 struct pt_regs *regs, int *rctxp);
263 static inline void
264 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
265 u64 count, struct pt_regs *regs, void *head)
267 perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
269 #endif
271 #endif /* _LINUX_FTRACE_EVENT_H */