davinci: make it possible to include clock.h and psc.h in assembly code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / trace / ftrace.h
blobc6fe03e902ca2c4502c984b79645d1d3648f2640
1 /*
2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
19 #include <linux/ftrace_event.h>
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
41 #undef __field
42 #define __field(type, item) type item;
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type) type item;
47 #undef __array
48 #define __array(type, item, len) type item[len];
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
66 #undef DEFINE_EVENT
67 #define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call event_##name
70 #undef DEFINE_EVENT_PRINT
71 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
74 #undef __cpparg
75 #define __cpparg(arg...) arg
77 /* Callbacks are meaningless to ftrace. */
78 #undef TRACE_EVENT_FN
79 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
80 assign, print, reg, unreg) \
81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
84 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
88 * Stage 2 of the trace events.
90 * Include the following:
92 * struct ftrace_data_offsets_<call> {
93 * u32 <item1>;
94 * u32 <item2>;
95 * [...]
96 * };
98 * The __dynamic_array() macro will create each u32 <item>, this is
99 * to keep the offset of each array from the beginning of the event.
100 * The size of an array is also encoded, in the higher 16 bits of <item>.
103 #undef __field
104 #define __field(type, item)
106 #undef __field_ext
107 #define __field_ext(type, item, filter_type)
109 #undef __array
110 #define __array(type, item, len)
112 #undef __dynamic_array
113 #define __dynamic_array(type, item, len) u32 item;
115 #undef __string
116 #define __string(item, src) __dynamic_array(char, item, -1)
118 #undef DECLARE_EVENT_CLASS
119 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
120 struct ftrace_data_offsets_##call { \
121 tstruct; \
124 #undef DEFINE_EVENT
125 #define DEFINE_EVENT(template, name, proto, args)
127 #undef DEFINE_EVENT_PRINT
128 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
131 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
134 * Setup the showing format of trace point.
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
139 * struct ftrace_raw_##call field;
140 * int ret;
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
150 #undef TP_STRUCT__entry
151 #define TP_STRUCT__entry(args...) args
153 #undef __field
154 #define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
157 (unsigned int)offsetof(typeof(field), item), \
158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
160 if (!ret) \
161 return 0;
163 #undef __field_ext
164 #define __field_ext(type, item, filter_type) __field(type, item)
166 #undef __array
167 #define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
170 (unsigned int)offsetof(typeof(field), item), \
171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
173 if (!ret) \
174 return 0;
176 #undef __dynamic_array
177 #define __dynamic_array(type, item, len) \
178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
184 if (!ret) \
185 return 0;
187 #undef __string
188 #define __string(item, src) __dynamic_array(char, item, -1)
190 #undef __entry
191 #define __entry REC
193 #undef __print_symbolic
194 #undef __get_dynamic_array
195 #undef __get_str
197 #undef TP_printk
198 #define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
200 #undef TP_fast_assign
201 #define TP_fast_assign(args...) args
203 #undef TP_perf_assign
204 #define TP_perf_assign(args...)
206 #undef DECLARE_EVENT_CLASS
207 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
208 static int \
209 ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
215 tstruct; \
217 return ret; \
220 static int \
221 ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
224 int ret = 0; \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
232 return ret; \
235 #undef DEFINE_EVENT
236 #define DEFINE_EVENT(template, name, proto, args)
238 #undef DEFINE_EVENT_PRINT
239 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240 static int \
241 ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
244 int ret = 0; \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
250 trace_seq_printf(s, "\nprint fmt: " print); \
252 return ret; \
255 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
258 * Stage 3 of the trace events.
260 * Override the macros in <trace/trace_events.h> to include the following:
262 * enum print_line_t
263 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
265 * struct trace_seq *s = &iter->seq;
266 * struct ftrace_raw_<call> *field; <-- defined in stage 1
267 * struct trace_entry *entry;
268 * struct trace_seq *p;
269 * int ret;
271 * entry = iter->ent;
273 * if (entry->type != event_<call>.id) {
274 * WARN_ON_ONCE(1);
275 * return TRACE_TYPE_UNHANDLED;
278 * field = (typeof(field))entry;
280 * p = get_cpu_var(ftrace_event_seq);
281 * trace_seq_init(p);
282 * ret = trace_seq_printf(s, <TP_printk> "\n");
283 * put_cpu();
284 * if (!ret)
285 * return TRACE_TYPE_PARTIAL_LINE;
287 * return TRACE_TYPE_HANDLED;
290 * This is the method used to print the raw event to the trace
291 * output format. Note, this is not needed if the data is read
292 * in binary.
295 #undef __entry
296 #define __entry field
298 #undef TP_printk
299 #define TP_printk(fmt, args...) fmt "\n", args
301 #undef __get_dynamic_array
302 #define __get_dynamic_array(field) \
303 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
305 #undef __get_str
306 #define __get_str(field) (char *)__get_dynamic_array(field)
308 #undef __print_flags
309 #define __print_flags(flag, delim, flag_array...) \
310 ({ \
311 static const struct trace_print_flags __flags[] = \
312 { flag_array, { -1, NULL }}; \
313 ftrace_print_flags_seq(p, delim, flag, __flags); \
316 #undef __print_symbolic
317 #define __print_symbolic(value, symbol_array...) \
318 ({ \
319 static const struct trace_print_flags symbols[] = \
320 { symbol_array, { -1, NULL }}; \
321 ftrace_print_symbols_seq(p, value, symbols); \
324 #undef DECLARE_EVENT_CLASS
325 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
326 static enum print_line_t \
327 ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \
330 struct trace_seq *s = &iter->seq; \
331 struct ftrace_raw_##call *field; \
332 struct trace_entry *entry; \
333 struct trace_seq *p; \
334 int ret; \
336 entry = iter->ent; \
338 if (entry->type != event_id) { \
339 WARN_ON_ONCE(1); \
340 return TRACE_TYPE_UNHANDLED; \
343 field = (typeof(field))entry; \
345 p = &get_cpu_var(ftrace_event_seq); \
346 trace_seq_init(p); \
347 ret = trace_seq_printf(s, "%s: ", name); \
348 if (ret) \
349 ret = trace_seq_printf(s, print); \
350 put_cpu(); \
351 if (!ret) \
352 return TRACE_TYPE_PARTIAL_LINE; \
354 return TRACE_TYPE_HANDLED; \
357 #undef DEFINE_EVENT
358 #define DEFINE_EVENT(template, name, proto, args) \
359 static enum print_line_t \
360 ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
362 return ftrace_raw_output_id_##template(event_##name.id, \
363 #name, iter, flags); \
366 #undef DEFINE_EVENT_PRINT
367 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368 static enum print_line_t \
369 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
371 struct trace_seq *s = &iter->seq; \
372 struct ftrace_raw_##template *field; \
373 struct trace_entry *entry; \
374 struct trace_seq *p; \
375 int ret; \
377 entry = iter->ent; \
379 if (entry->type != event_##call.id) { \
380 WARN_ON_ONCE(1); \
381 return TRACE_TYPE_UNHANDLED; \
384 field = (typeof(field))entry; \
386 p = &get_cpu_var(ftrace_event_seq); \
387 trace_seq_init(p); \
388 ret = trace_seq_printf(s, "%s: ", #call); \
389 if (ret) \
390 ret = trace_seq_printf(s, print); \
391 put_cpu(); \
392 if (!ret) \
393 return TRACE_TYPE_PARTIAL_LINE; \
395 return TRACE_TYPE_HANDLED; \
398 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
400 #undef __field_ext
401 #define __field_ext(type, item, filter_type) \
402 ret = trace_define_field(event_call, #type, #item, \
403 offsetof(typeof(field), item), \
404 sizeof(field.item), \
405 is_signed_type(type), filter_type); \
406 if (ret) \
407 return ret;
409 #undef __field
410 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
412 #undef __array
413 #define __array(type, item, len) \
414 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
415 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
416 offsetof(typeof(field), item), \
417 sizeof(field.item), \
418 is_signed_type(type), FILTER_OTHER); \
419 if (ret) \
420 return ret;
422 #undef __dynamic_array
423 #define __dynamic_array(type, item, len) \
424 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
425 offsetof(typeof(field), __data_loc_##item), \
426 sizeof(field.__data_loc_##item), \
427 is_signed_type(type), FILTER_OTHER);
429 #undef __string
430 #define __string(item, src) __dynamic_array(char, item, -1)
432 #undef DECLARE_EVENT_CLASS
433 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
434 static int \
435 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
437 struct ftrace_raw_##call field; \
438 int ret; \
440 tstruct; \
442 return ret; \
445 #undef DEFINE_EVENT
446 #define DEFINE_EVENT(template, name, proto, args)
448 #undef DEFINE_EVENT_PRINT
449 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
450 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
452 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
455 * remember the offset of each array from the beginning of the event.
458 #undef __entry
459 #define __entry entry
461 #undef __field
462 #define __field(type, item)
464 #undef __field_ext
465 #define __field_ext(type, item, filter_type)
467 #undef __array
468 #define __array(type, item, len)
470 #undef __dynamic_array
471 #define __dynamic_array(type, item, len) \
472 __data_offsets->item = __data_size + \
473 offsetof(typeof(*entry), __data); \
474 __data_offsets->item |= (len * sizeof(type)) << 16; \
475 __data_size += (len) * sizeof(type);
477 #undef __string
478 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
480 #undef DECLARE_EVENT_CLASS
481 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
482 static inline int ftrace_get_offsets_##call( \
483 struct ftrace_data_offsets_##call *__data_offsets, proto) \
485 int __data_size = 0; \
486 struct ftrace_raw_##call __maybe_unused *entry; \
488 tstruct; \
490 return __data_size; \
493 #undef DEFINE_EVENT
494 #define DEFINE_EVENT(template, name, proto, args)
496 #undef DEFINE_EVENT_PRINT
497 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
498 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
500 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
502 #ifdef CONFIG_EVENT_PROFILE
505 * Generate the functions needed for tracepoint perf_event support.
507 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
509 * static int ftrace_profile_enable_<call>(void)
511 * return register_trace_<call>(ftrace_profile_<call>);
514 * static void ftrace_profile_disable_<call>(void)
516 * unregister_trace_<call>(ftrace_profile_<call>);
521 #undef DECLARE_EVENT_CLASS
522 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
524 #undef DEFINE_EVENT
525 #define DEFINE_EVENT(template, name, proto, args) \
527 static void ftrace_profile_##name(proto); \
529 static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
531 return register_trace_##name(ftrace_profile_##name); \
534 static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
536 unregister_trace_##name(ftrace_profile_##name); \
539 #undef DEFINE_EVENT_PRINT
540 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
541 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
543 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
545 #endif
548 * Stage 4 of the trace events.
550 * Override the macros in <trace/trace_events.h> to include the following:
552 * static void ftrace_event_<call>(proto)
554 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
557 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
559 * return register_trace_<call>(ftrace_event_<call>);
562 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
564 * unregister_trace_<call>(ftrace_event_<call>);
568 * For those macros defined with TRACE_EVENT:
570 * static struct ftrace_event_call event_<call>;
572 * static void ftrace_raw_event_<call>(proto)
574 * struct ring_buffer_event *event;
575 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
576 * struct ring_buffer *buffer;
577 * unsigned long irq_flags;
578 * int pc;
580 * local_save_flags(irq_flags);
581 * pc = preempt_count();
583 * event = trace_current_buffer_lock_reserve(&buffer,
584 * event_<call>.id,
585 * sizeof(struct ftrace_raw_<call>),
586 * irq_flags, pc);
587 * if (!event)
588 * return;
589 * entry = ring_buffer_event_data(event);
591 * <assign>; <-- Here we assign the entries by the __field and
592 * __array macros.
594 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
597 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
599 * int ret;
601 * ret = register_trace_<call>(ftrace_raw_event_<call>);
602 * if (!ret)
603 * pr_info("event trace: Could not activate trace point "
604 * "probe to <call>");
605 * return ret;
608 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
610 * unregister_trace_<call>(ftrace_raw_event_<call>);
613 * static struct trace_event ftrace_event_type_<call> = {
614 * .trace = ftrace_raw_output_<call>, <-- stage 2
615 * };
617 * static struct ftrace_event_call __used
618 * __attribute__((__aligned__(4)))
619 * __attribute__((section("_ftrace_events"))) event_<call> = {
620 * .name = "<call>",
621 * .system = "<system>",
622 * .raw_init = trace_event_raw_init,
623 * .regfunc = ftrace_reg_event_<call>,
624 * .unregfunc = ftrace_unreg_event_<call>,
625 * .show_format = ftrace_format_<call>,
630 #ifdef CONFIG_EVENT_PROFILE
632 #define _TRACE_PROFILE_INIT(call) \
633 .profile_enable = ftrace_profile_enable_##call, \
634 .profile_disable = ftrace_profile_disable_##call,
636 #else
637 #define _TRACE_PROFILE_INIT(call)
638 #endif
640 #undef __entry
641 #define __entry entry
643 #undef __field
644 #define __field(type, item)
646 #undef __array
647 #define __array(type, item, len)
649 #undef __dynamic_array
650 #define __dynamic_array(type, item, len) \
651 __entry->__data_loc_##item = __data_offsets.item;
653 #undef __string
654 #define __string(item, src) __dynamic_array(char, item, -1) \
656 #undef __assign_str
657 #define __assign_str(dst, src) \
658 strcpy(__get_str(dst), src);
660 #undef DECLARE_EVENT_CLASS
661 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
663 static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
664 proto) \
666 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
667 struct ring_buffer_event *event; \
668 struct ftrace_raw_##call *entry; \
669 struct ring_buffer *buffer; \
670 unsigned long irq_flags; \
671 int __data_size; \
672 int pc; \
674 local_save_flags(irq_flags); \
675 pc = preempt_count(); \
677 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
679 event = trace_current_buffer_lock_reserve(&buffer, \
680 event_call->id, \
681 sizeof(*entry) + __data_size, \
682 irq_flags, pc); \
683 if (!event) \
684 return; \
685 entry = ring_buffer_event_data(event); \
688 tstruct \
690 { assign; } \
692 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
693 trace_nowake_buffer_unlock_commit(buffer, \
694 event, irq_flags, pc); \
697 #undef DEFINE_EVENT
698 #define DEFINE_EVENT(template, call, proto, args) \
700 static void ftrace_raw_event_##call(proto) \
702 ftrace_raw_event_id_##template(&event_##call, args); \
705 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
707 return register_trace_##call(ftrace_raw_event_##call); \
710 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
712 unregister_trace_##call(ftrace_raw_event_##call); \
715 static struct trace_event ftrace_event_type_##call = { \
716 .trace = ftrace_raw_output_##call, \
719 #undef DEFINE_EVENT_PRINT
720 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
721 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
723 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
725 #undef DECLARE_EVENT_CLASS
726 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
728 #undef DEFINE_EVENT
729 #define DEFINE_EVENT(template, call, proto, args) \
731 static struct ftrace_event_call __used \
732 __attribute__((__aligned__(4))) \
733 __attribute__((section("_ftrace_events"))) event_##call = { \
734 .name = #call, \
735 .system = __stringify(TRACE_SYSTEM), \
736 .event = &ftrace_event_type_##call, \
737 .raw_init = trace_event_raw_init, \
738 .regfunc = ftrace_raw_reg_event_##call, \
739 .unregfunc = ftrace_raw_unreg_event_##call, \
740 .show_format = ftrace_format_##template, \
741 .define_fields = ftrace_define_fields_##template, \
742 _TRACE_PROFILE_INIT(call) \
745 #undef DEFINE_EVENT_PRINT
746 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
748 static struct ftrace_event_call __used \
749 __attribute__((__aligned__(4))) \
750 __attribute__((section("_ftrace_events"))) event_##call = { \
751 .name = #call, \
752 .system = __stringify(TRACE_SYSTEM), \
753 .event = &ftrace_event_type_##call, \
754 .raw_init = trace_event_raw_init, \
755 .regfunc = ftrace_raw_reg_event_##call, \
756 .unregfunc = ftrace_raw_unreg_event_##call, \
757 .show_format = ftrace_format_##call, \
758 .define_fields = ftrace_define_fields_##template, \
759 _TRACE_PROFILE_INIT(call) \
762 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
765 * Define the insertion callback to profile events
767 * The job is very similar to ftrace_raw_event_<call> except that we don't
768 * insert in the ring buffer but in a perf counter.
770 * static void ftrace_profile_<call>(proto)
772 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
773 * struct ftrace_event_call *event_call = &event_<call>;
774 * extern void perf_tp_event(int, u64, u64, void *, int);
775 * struct ftrace_raw_##call *entry;
776 * struct perf_trace_buf *trace_buf;
777 * u64 __addr = 0, __count = 1;
778 * unsigned long irq_flags;
779 * struct trace_entry *ent;
780 * int __entry_size;
781 * int __data_size;
782 * int __cpu
783 * int pc;
785 * pc = preempt_count();
787 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
789 * // Below we want to get the aligned size by taking into account
790 * // the u32 field that will later store the buffer size
791 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
792 * sizeof(u64));
793 * __entry_size -= sizeof(u32);
795 * // Protect the non nmi buffer
796 * // This also protects the rcu read side
797 * local_irq_save(irq_flags);
798 * __cpu = smp_processor_id();
800 * if (in_nmi())
801 * trace_buf = rcu_dereference(perf_trace_buf_nmi);
802 * else
803 * trace_buf = rcu_dereference(perf_trace_buf);
805 * if (!trace_buf)
806 * goto end;
808 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
810 * // Avoid recursion from perf that could mess up the buffer
811 * if (trace_buf->recursion++)
812 * goto end_recursion;
814 * raw_data = trace_buf->buf;
816 * // Make recursion update visible before entering perf_tp_event
817 * // so that we protect from perf recursions.
819 * barrier();
821 * //zero dead bytes from alignment to avoid stack leak to userspace:
822 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
823 * entry = (struct ftrace_raw_<call> *)raw_data;
824 * ent = &entry->ent;
825 * tracing_generic_entry_update(ent, irq_flags, pc);
826 * ent->type = event_call->id;
828 * <tstruct> <- do some jobs with dynamic arrays
830 * <assign> <- affect our values
832 * perf_tp_event(event_call->id, __addr, __count, entry,
833 * __entry_size); <- submit them to perf counter
838 #ifdef CONFIG_EVENT_PROFILE
840 #undef __perf_addr
841 #define __perf_addr(a) __addr = (a)
843 #undef __perf_count
844 #define __perf_count(c) __count = (c)
846 #undef DECLARE_EVENT_CLASS
847 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
848 static void \
849 ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
850 proto) \
852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
853 extern int perf_swevent_get_recursion_context(void); \
854 extern void perf_swevent_put_recursion_context(int rctx); \
855 extern void perf_tp_event(int, u64, u64, void *, int); \
856 struct ftrace_raw_##call *entry; \
857 u64 __addr = 0, __count = 1; \
858 unsigned long irq_flags; \
859 struct trace_entry *ent; \
860 int __entry_size; \
861 int __data_size; \
862 char *trace_buf; \
863 char *raw_data; \
864 int __cpu; \
865 int rctx; \
866 int pc; \
868 pc = preempt_count(); \
870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
872 sizeof(u64)); \
873 __entry_size -= sizeof(u32); \
875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
876 "profile buffer not large enough")) \
877 return; \
879 local_irq_save(irq_flags); \
881 rctx = perf_swevent_get_recursion_context(); \
882 if (rctx < 0) \
883 goto end_recursion; \
885 __cpu = smp_processor_id(); \
887 if (in_nmi()) \
888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
889 else \
890 trace_buf = rcu_dereference(perf_trace_buf); \
892 if (!trace_buf) \
893 goto end; \
895 raw_data = per_cpu_ptr(trace_buf, __cpu); \
897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
898 entry = (struct ftrace_raw_##call *)raw_data; \
899 ent = &entry->ent; \
900 tracing_generic_entry_update(ent, irq_flags, pc); \
901 ent->type = event_call->id; \
903 tstruct \
905 { assign; } \
907 perf_tp_event(event_call->id, __addr, __count, entry, \
908 __entry_size); \
910 end: \
911 perf_swevent_put_recursion_context(rctx); \
912 end_recursion: \
913 local_irq_restore(irq_flags); \
916 #undef DEFINE_EVENT
917 #define DEFINE_EVENT(template, call, proto, args) \
918 static void ftrace_profile_##call(proto) \
920 struct ftrace_event_call *event_call = &event_##call; \
922 ftrace_profile_templ_##template(event_call, args); \
925 #undef DEFINE_EVENT_PRINT
926 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
927 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
929 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
930 #endif /* CONFIG_EVENT_PROFILE */
932 #undef _TRACE_PROFILE_INIT