tracing: avoid warnings from zero-arg tracepoints
[linux-2.6/mini2440.git] / include / trace / ftrace.h
blob39a3351f2e7ffee0157d7f2e4cf78466c56bcb35
1 /*
2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
19 #include <linux/ftrace_event.h>
21 #undef TRACE_FORMAT
22 #define TRACE_FORMAT(call, proto, args, fmt)
24 #undef __array
25 #define __array(type, item, len) type item[len];
27 #undef __field
28 #define __field(type, item) type item;
30 #undef TP_STRUCT__entry
31 #define TP_STRUCT__entry(args...) args
33 #undef TRACE_EVENT
34 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
35 struct ftrace_raw_##name { \
36 struct trace_entry ent; \
37 tstruct \
38 }; \
39 static struct ftrace_event_call event_##name
41 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
44 * Stage 2 of the trace events.
46 * Override the macros in <trace/trace_events.h> to include the following:
48 * enum print_line_t
49 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
50 * {
51 * struct trace_seq *s = &iter->seq;
52 * struct ftrace_raw_<call> *field; <-- defined in stage 1
53 * struct trace_entry *entry;
54 * int ret;
56 * entry = iter->ent;
58 * if (entry->type != event_<call>.id) {
59 * WARN_ON_ONCE(1);
60 * return TRACE_TYPE_UNHANDLED;
61 * }
63 * field = (typeof(field))entry;
65 * ret = trace_seq_printf(s, <TP_printk> "\n");
66 * if (!ret)
67 * return TRACE_TYPE_PARTIAL_LINE;
69 * return TRACE_TYPE_HANDLED;
70 * }
72 * This is the method used to print the raw event to the trace
73 * output format. Note, this is not needed if the data is read
74 * in binary.
77 #undef __entry
78 #define __entry field
80 #undef TP_printk
81 #define TP_printk(fmt, args...) fmt "\n", args
83 #undef TRACE_EVENT
84 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
85 enum print_line_t \
86 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
87 { \
88 struct trace_seq *s = &iter->seq; \
89 struct ftrace_raw_##call *field; \
90 struct trace_entry *entry; \
91 int ret; \
93 entry = iter->ent; \
95 if (entry->type != event_##call.id) { \
96 WARN_ON_ONCE(1); \
97 return TRACE_TYPE_UNHANDLED; \
98 } \
100 field = (typeof(field))entry; \
102 ret = trace_seq_printf(s, #call ": " print); \
103 if (!ret) \
104 return TRACE_TYPE_PARTIAL_LINE; \
106 return TRACE_TYPE_HANDLED; \
109 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
112 * Setup the showing format of trace point.
114 * int
115 * ftrace_format_##call(struct trace_seq *s)
117 * struct ftrace_raw_##call field;
118 * int ret;
120 * ret = trace_seq_printf(s, #type " " #item ";"
121 * " offset:%u; size:%u;\n",
122 * offsetof(struct ftrace_raw_##call, item),
123 * sizeof(field.type));
128 #undef TP_STRUCT__entry
129 #define TP_STRUCT__entry(args...) args
131 #undef __field
132 #define __field(type, item) \
133 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
134 "offset:%u;\tsize:%u;\n", \
135 (unsigned int)offsetof(typeof(field), item), \
136 (unsigned int)sizeof(field.item)); \
137 if (!ret) \
138 return 0;
140 #undef __array
141 #define __array(type, item, len) \
142 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
143 "offset:%u;\tsize:%u;\n", \
144 (unsigned int)offsetof(typeof(field), item), \
145 (unsigned int)sizeof(field.item)); \
146 if (!ret) \
147 return 0;
149 #undef __entry
150 #define __entry REC
152 #undef TP_printk
153 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
155 #undef TP_fast_assign
156 #define TP_fast_assign(args...) args
158 #undef TRACE_EVENT
159 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
160 static int \
161 ftrace_format_##call(struct trace_seq *s) \
163 struct ftrace_raw_##call field __attribute__((unused)); \
164 int ret = 0; \
166 tstruct; \
168 trace_seq_printf(s, "\nprint fmt: " print); \
170 return ret; \
173 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
175 #undef __field
176 #define __field(type, item) \
177 ret = trace_define_field(event_call, #type, #item, \
178 offsetof(typeof(field), item), \
179 sizeof(field.item)); \
180 if (ret) \
181 return ret;
183 #undef __array
184 #define __array(type, item, len) \
185 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
186 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
187 offsetof(typeof(field), item), \
188 sizeof(field.item)); \
189 if (ret) \
190 return ret;
192 #undef TRACE_EVENT
193 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
194 int \
195 ftrace_define_fields_##call(void) \
197 struct ftrace_raw_##call field; \
198 struct ftrace_event_call *event_call = &event_##call; \
199 int ret; \
201 __common_field(unsigned char, type); \
202 __common_field(unsigned char, flags); \
203 __common_field(unsigned char, preempt_count); \
204 __common_field(int, pid); \
205 __common_field(int, tgid); \
207 tstruct; \
209 return ret; \
212 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
215 * Stage 3 of the trace events.
217 * Override the macros in <trace/trace_events.h> to include the following:
219 * static void ftrace_event_<call>(proto)
221 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
224 * static int ftrace_reg_event_<call>(void)
226 * int ret;
228 * ret = register_trace_<call>(ftrace_event_<call>);
229 * if (!ret)
230 * pr_info("event trace: Could not activate trace point "
231 * "probe to <call>");
232 * return ret;
235 * static void ftrace_unreg_event_<call>(void)
237 * unregister_trace_<call>(ftrace_event_<call>);
240 * For those macros defined with TRACE_FORMAT:
242 * static struct ftrace_event_call __used
243 * __attribute__((__aligned__(4)))
244 * __attribute__((section("_ftrace_events"))) event_<call> = {
245 * .name = "<call>",
246 * .regfunc = ftrace_reg_event_<call>,
247 * .unregfunc = ftrace_unreg_event_<call>,
251 * For those macros defined with TRACE_EVENT:
253 * static struct ftrace_event_call event_<call>;
255 * static void ftrace_raw_event_<call>(proto)
257 * struct ring_buffer_event *event;
258 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
259 * unsigned long irq_flags;
260 * int pc;
262 * local_save_flags(irq_flags);
263 * pc = preempt_count();
265 * event = trace_current_buffer_lock_reserve(event_<call>.id,
266 * sizeof(struct ftrace_raw_<call>),
267 * irq_flags, pc);
268 * if (!event)
269 * return;
270 * entry = ring_buffer_event_data(event);
272 * <assign>; <-- Here we assign the entries by the __field and
273 * __array macros.
275 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
278 * static int ftrace_raw_reg_event_<call>(void)
280 * int ret;
282 * ret = register_trace_<call>(ftrace_raw_event_<call>);
283 * if (!ret)
284 * pr_info("event trace: Could not activate trace point "
285 * "probe to <call>");
286 * return ret;
289 * static void ftrace_unreg_event_<call>(void)
291 * unregister_trace_<call>(ftrace_raw_event_<call>);
294 * static struct trace_event ftrace_event_type_<call> = {
295 * .trace = ftrace_raw_output_<call>, <-- stage 2
296 * };
298 * static int ftrace_raw_init_event_<call>(void)
300 * int id;
302 * id = register_ftrace_event(&ftrace_event_type_<call>);
303 * if (!id)
304 * return -ENODEV;
305 * event_<call>.id = id;
306 * return 0;
309 * static struct ftrace_event_call __used
310 * __attribute__((__aligned__(4)))
311 * __attribute__((section("_ftrace_events"))) event_<call> = {
312 * .name = "<call>",
313 * .system = "<system>",
314 * .raw_init = ftrace_raw_init_event_<call>,
315 * .regfunc = ftrace_reg_event_<call>,
316 * .unregfunc = ftrace_unreg_event_<call>,
317 * .show_format = ftrace_format_<call>,
322 #undef TP_FMT
323 #define TP_FMT(fmt, args...) fmt "\n", ##args
325 #ifdef CONFIG_EVENT_PROFILE
326 #define _TRACE_PROFILE(call, proto, args) \
327 static void ftrace_profile_##call(proto) \
329 extern void perf_tpcounter_event(int); \
330 perf_tpcounter_event(event_##call.id); \
333 static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
335 int ret = 0; \
337 if (!atomic_inc_return(&call->profile_count)) \
338 ret = register_trace_##call(ftrace_profile_##call); \
340 return ret; \
343 static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
345 if (atomic_add_negative(-1, &call->profile_count)) \
346 unregister_trace_##call(ftrace_profile_##call); \
349 #define _TRACE_PROFILE_INIT(call) \
350 .profile_count = ATOMIC_INIT(-1), \
351 .profile_enable = ftrace_profile_enable_##call, \
352 .profile_disable = ftrace_profile_disable_##call,
354 #else
355 #define _TRACE_PROFILE(call, proto, args)
356 #define _TRACE_PROFILE_INIT(call)
357 #endif
359 #define _TRACE_FORMAT(call, proto, args, fmt) \
360 static void ftrace_event_##call(proto) \
362 event_trace_printk(_RET_IP_, #call ": " fmt); \
365 static int ftrace_reg_event_##call(void) \
367 int ret; \
369 ret = register_trace_##call(ftrace_event_##call); \
370 if (ret) \
371 pr_info("event trace: Could not activate trace point " \
372 "probe to " #call "\n"); \
373 return ret; \
376 static void ftrace_unreg_event_##call(void) \
378 unregister_trace_##call(ftrace_event_##call); \
381 static struct ftrace_event_call event_##call; \
383 static int ftrace_init_event_##call(void) \
385 int id; \
387 id = register_ftrace_event(NULL); \
388 if (!id) \
389 return -ENODEV; \
390 event_##call.id = id; \
391 return 0; \
394 #undef TRACE_FORMAT
395 #define TRACE_FORMAT(call, proto, args, fmt) \
396 _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
397 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
398 static struct ftrace_event_call __used \
399 __attribute__((__aligned__(4))) \
400 __attribute__((section("_ftrace_events"))) event_##call = { \
401 .name = #call, \
402 .system = __stringify(TRACE_SYSTEM), \
403 .raw_init = ftrace_init_event_##call, \
404 .regfunc = ftrace_reg_event_##call, \
405 .unregfunc = ftrace_unreg_event_##call, \
406 _TRACE_PROFILE_INIT(call) \
409 #undef __entry
410 #define __entry entry
412 #undef TRACE_EVENT
413 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
414 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
416 static struct ftrace_event_call event_##call; \
418 static void ftrace_raw_event_##call(proto) \
420 struct ftrace_event_call *call = &event_##call; \
421 struct ring_buffer_event *event; \
422 struct ftrace_raw_##call *entry; \
423 unsigned long irq_flags; \
424 int pc; \
426 local_save_flags(irq_flags); \
427 pc = preempt_count(); \
429 event = trace_current_buffer_lock_reserve(event_##call.id, \
430 sizeof(struct ftrace_raw_##call), \
431 irq_flags, pc); \
432 if (!event) \
433 return; \
434 entry = ring_buffer_event_data(event); \
436 assign; \
438 if (!filter_current_check_discard(call, entry, event)) \
439 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
442 static int ftrace_raw_reg_event_##call(void) \
444 int ret; \
446 ret = register_trace_##call(ftrace_raw_event_##call); \
447 if (ret) \
448 pr_info("event trace: Could not activate trace point " \
449 "probe to " #call "\n"); \
450 return ret; \
453 static void ftrace_raw_unreg_event_##call(void) \
455 unregister_trace_##call(ftrace_raw_event_##call); \
458 static struct trace_event ftrace_event_type_##call = { \
459 .trace = ftrace_raw_output_##call, \
460 }; \
462 static int ftrace_raw_init_event_##call(void) \
464 int id; \
466 id = register_ftrace_event(&ftrace_event_type_##call); \
467 if (!id) \
468 return -ENODEV; \
469 event_##call.id = id; \
470 INIT_LIST_HEAD(&event_##call.fields); \
471 init_preds(&event_##call); \
472 return 0; \
475 static struct ftrace_event_call __used \
476 __attribute__((__aligned__(4))) \
477 __attribute__((section("_ftrace_events"))) event_##call = { \
478 .name = #call, \
479 .system = __stringify(TRACE_SYSTEM), \
480 .event = &ftrace_event_type_##call, \
481 .raw_init = ftrace_raw_init_event_##call, \
482 .regfunc = ftrace_raw_reg_event_##call, \
483 .unregfunc = ftrace_raw_unreg_event_##call, \
484 .show_format = ftrace_format_##call, \
485 .define_fields = ftrace_define_fields_##call, \
486 _TRACE_PROFILE_INIT(call) \
489 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
491 #undef _TRACE_PROFILE
492 #undef _TRACE_PROFILE_INIT