staging:iio: ring core cleanups + check if read_last available in lis3l02dq
[linux-2.6/libata-dev.git] / include / trace / ftrace.h
blob3e68366d485af387a1f894bec7e15f491484d3a3
1 /*
2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
19 #include <linux/ftrace_event.h>
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
41 #undef __field
42 #define __field(type, item) type item;
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type) type item;
47 #undef __array
48 #define __array(type, item, len) type item[len];
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
65 }; \
67 static struct ftrace_event_class event_class_##name;
69 #undef DEFINE_EVENT
70 #define DEFINE_EVENT(template, name, proto, args) \
71 static struct ftrace_event_call __used \
72 __attribute__((__aligned__(4))) event_##name
74 #undef DEFINE_EVENT_PRINT
75 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
78 /* Callbacks are meaningless to ftrace. */
79 #undef TRACE_EVENT_FN
80 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
81 assign, print, reg, unreg) \
82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
85 #undef TRACE_EVENT_FLAGS
86 #define TRACE_EVENT_FLAGS(name, value) \
87 __TRACE_EVENT_FLAGS(name, value)
89 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
93 * Stage 2 of the trace events.
95 * Include the following:
97 * struct ftrace_data_offsets_<call> {
98 * u32 <item1>;
99 * u32 <item2>;
100 * [...]
101 * };
103 * The __dynamic_array() macro will create each u32 <item>, this is
104 * to keep the offset of each array from the beginning of the event.
105 * The size of an array is also encoded, in the higher 16 bits of <item>.
108 #undef __field
109 #define __field(type, item)
111 #undef __field_ext
112 #define __field_ext(type, item, filter_type)
114 #undef __array
115 #define __array(type, item, len)
117 #undef __dynamic_array
118 #define __dynamic_array(type, item, len) u32 item;
120 #undef __string
121 #define __string(item, src) __dynamic_array(char, item, -1)
123 #undef DECLARE_EVENT_CLASS
124 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
125 struct ftrace_data_offsets_##call { \
126 tstruct; \
129 #undef DEFINE_EVENT
130 #define DEFINE_EVENT(template, name, proto, args)
132 #undef DEFINE_EVENT_PRINT
133 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
134 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
136 #undef TRACE_EVENT_FLAGS
137 #define TRACE_EVENT_FLAGS(event, flag)
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
142 * Stage 3 of the trace events.
144 * Override the macros in <trace/trace_events.h> to include the following:
146 * enum print_line_t
147 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
149 * struct trace_seq *s = &iter->seq;
150 * struct ftrace_raw_<call> *field; <-- defined in stage 1
151 * struct trace_entry *entry;
152 * struct trace_seq *p = &iter->tmp_seq;
153 * int ret;
155 * entry = iter->ent;
157 * if (entry->type != event_<call>->event.type) {
158 * WARN_ON_ONCE(1);
159 * return TRACE_TYPE_UNHANDLED;
162 * field = (typeof(field))entry;
164 * trace_seq_init(p);
165 * ret = trace_seq_printf(s, "%s: ", <call>);
166 * if (ret)
167 * ret = trace_seq_printf(s, <TP_printk> "\n");
168 * if (!ret)
169 * return TRACE_TYPE_PARTIAL_LINE;
171 * return TRACE_TYPE_HANDLED;
174 * This is the method used to print the raw event to the trace
175 * output format. Note, this is not needed if the data is read
176 * in binary.
179 #undef __entry
180 #define __entry field
182 #undef TP_printk
183 #define TP_printk(fmt, args...) fmt "\n", args
185 #undef __get_dynamic_array
186 #define __get_dynamic_array(field) \
187 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
189 #undef __get_str
190 #define __get_str(field) (char *)__get_dynamic_array(field)
192 #undef __print_flags
193 #define __print_flags(flag, delim, flag_array...) \
194 ({ \
195 static const struct trace_print_flags __flags[] = \
196 { flag_array, { -1, NULL }}; \
197 ftrace_print_flags_seq(p, delim, flag, __flags); \
200 #undef __print_symbolic
201 #define __print_symbolic(value, symbol_array...) \
202 ({ \
203 static const struct trace_print_flags symbols[] = \
204 { symbol_array, { -1, NULL }}; \
205 ftrace_print_symbols_seq(p, value, symbols); \
208 #undef __print_hex
209 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
211 #undef DECLARE_EVENT_CLASS
212 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
213 static notrace enum print_line_t \
214 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
215 struct trace_event *trace_event) \
217 struct ftrace_event_call *event; \
218 struct trace_seq *s = &iter->seq; \
219 struct ftrace_raw_##call *field; \
220 struct trace_entry *entry; \
221 struct trace_seq *p = &iter->tmp_seq; \
222 int ret; \
224 event = container_of(trace_event, struct ftrace_event_call, \
225 event); \
227 entry = iter->ent; \
229 if (entry->type != event->event.type) { \
230 WARN_ON_ONCE(1); \
231 return TRACE_TYPE_UNHANDLED; \
234 field = (typeof(field))entry; \
236 trace_seq_init(p); \
237 ret = trace_seq_printf(s, "%s: ", event->name); \
238 if (ret) \
239 ret = trace_seq_printf(s, print); \
240 if (!ret) \
241 return TRACE_TYPE_PARTIAL_LINE; \
243 return TRACE_TYPE_HANDLED; \
245 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
246 .trace = ftrace_raw_output_##call, \
249 #undef DEFINE_EVENT_PRINT
250 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
251 static notrace enum print_line_t \
252 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
253 struct trace_event *event) \
255 struct trace_seq *s = &iter->seq; \
256 struct ftrace_raw_##template *field; \
257 struct trace_entry *entry; \
258 struct trace_seq *p = &iter->tmp_seq; \
259 int ret; \
261 entry = iter->ent; \
263 if (entry->type != event_##call.event.type) { \
264 WARN_ON_ONCE(1); \
265 return TRACE_TYPE_UNHANDLED; \
268 field = (typeof(field))entry; \
270 trace_seq_init(p); \
271 ret = trace_seq_printf(s, "%s: ", #call); \
272 if (ret) \
273 ret = trace_seq_printf(s, print); \
274 if (!ret) \
275 return TRACE_TYPE_PARTIAL_LINE; \
277 return TRACE_TYPE_HANDLED; \
279 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
280 .trace = ftrace_raw_output_##call, \
283 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
285 #undef __field_ext
286 #define __field_ext(type, item, filter_type) \
287 ret = trace_define_field(event_call, #type, #item, \
288 offsetof(typeof(field), item), \
289 sizeof(field.item), \
290 is_signed_type(type), filter_type); \
291 if (ret) \
292 return ret;
294 #undef __field
295 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
297 #undef __array
298 #define __array(type, item, len) \
299 do { \
300 mutex_lock(&event_storage_mutex); \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 snprintf(event_storage, sizeof(event_storage), \
303 "%s[%d]", #type, len); \
304 ret = trace_define_field(event_call, event_storage, #item, \
305 offsetof(typeof(field), item), \
306 sizeof(field.item), \
307 is_signed_type(type), FILTER_OTHER); \
308 mutex_unlock(&event_storage_mutex); \
309 if (ret) \
310 return ret; \
311 } while (0);
313 #undef __dynamic_array
314 #define __dynamic_array(type, item, len) \
315 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
316 offsetof(typeof(field), __data_loc_##item), \
317 sizeof(field.__data_loc_##item), \
318 is_signed_type(type), FILTER_OTHER);
320 #undef __string
321 #define __string(item, src) __dynamic_array(char, item, -1)
323 #undef DECLARE_EVENT_CLASS
324 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
325 static int notrace \
326 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
328 struct ftrace_raw_##call field; \
329 int ret; \
331 tstruct; \
333 return ret; \
336 #undef DEFINE_EVENT
337 #define DEFINE_EVENT(template, name, proto, args)
339 #undef DEFINE_EVENT_PRINT
340 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
341 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
343 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
346 * remember the offset of each array from the beginning of the event.
349 #undef __entry
350 #define __entry entry
352 #undef __field
353 #define __field(type, item)
355 #undef __field_ext
356 #define __field_ext(type, item, filter_type)
358 #undef __array
359 #define __array(type, item, len)
361 #undef __dynamic_array
362 #define __dynamic_array(type, item, len) \
363 __data_offsets->item = __data_size + \
364 offsetof(typeof(*entry), __data); \
365 __data_offsets->item |= (len * sizeof(type)) << 16; \
366 __data_size += (len) * sizeof(type);
368 #undef __string
369 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
371 #undef DECLARE_EVENT_CLASS
372 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
373 static inline notrace int ftrace_get_offsets_##call( \
374 struct ftrace_data_offsets_##call *__data_offsets, proto) \
376 int __data_size = 0; \
377 struct ftrace_raw_##call __maybe_unused *entry; \
379 tstruct; \
381 return __data_size; \
384 #undef DEFINE_EVENT
385 #define DEFINE_EVENT(template, name, proto, args)
387 #undef DEFINE_EVENT_PRINT
388 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
389 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
391 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
394 * Stage 4 of the trace events.
396 * Override the macros in <trace/trace_events.h> to include the following:
398 * For those macros defined with TRACE_EVENT:
400 * static struct ftrace_event_call event_<call>;
402 * static void ftrace_raw_event_<call>(void *__data, proto)
404 * struct ftrace_event_call *event_call = __data;
405 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
406 * struct ring_buffer_event *event;
407 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
408 * struct ring_buffer *buffer;
409 * unsigned long irq_flags;
410 * int __data_size;
411 * int pc;
413 * local_save_flags(irq_flags);
414 * pc = preempt_count();
416 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
418 * event = trace_current_buffer_lock_reserve(&buffer,
419 * event_<call>->event.type,
420 * sizeof(*entry) + __data_size,
421 * irq_flags, pc);
422 * if (!event)
423 * return;
424 * entry = ring_buffer_event_data(event);
426 * { <assign>; } <-- Here we assign the entries by the __field and
427 * __array macros.
429 * if (!filter_current_check_discard(buffer, event_call, entry, event))
430 * trace_current_buffer_unlock_commit(buffer,
431 * event, irq_flags, pc);
434 * static struct trace_event ftrace_event_type_<call> = {
435 * .trace = ftrace_raw_output_<call>, <-- stage 2
436 * };
438 * static const char print_fmt_<call>[] = <TP_printk>;
440 * static struct ftrace_event_class __used event_class_<template> = {
441 * .system = "<system>",
442 * .define_fields = ftrace_define_fields_<call>,
443 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
444 * .raw_init = trace_event_raw_init,
445 * .probe = ftrace_raw_event_##call,
446 * .reg = ftrace_event_reg,
447 * };
449 * static struct ftrace_event_call event_<call> = {
450 * .name = "<call>",
451 * .class = event_class_<template>,
452 * .event = &ftrace_event_type_<call>,
453 * .print_fmt = print_fmt_<call>,
454 * };
455 * // its only safe to use pointers when doing linker tricks to
456 * // create an array.
457 * static struct ftrace_event_call __used
458 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
462 #ifdef CONFIG_PERF_EVENTS
464 #define _TRACE_PERF_PROTO(call, proto) \
465 static notrace void \
466 perf_trace_##call(void *__data, proto);
468 #define _TRACE_PERF_INIT(call) \
469 .perf_probe = perf_trace_##call,
471 #else
472 #define _TRACE_PERF_PROTO(call, proto)
473 #define _TRACE_PERF_INIT(call)
474 #endif /* CONFIG_PERF_EVENTS */
476 #undef __entry
477 #define __entry entry
479 #undef __field
480 #define __field(type, item)
482 #undef __array
483 #define __array(type, item, len)
485 #undef __dynamic_array
486 #define __dynamic_array(type, item, len) \
487 __entry->__data_loc_##item = __data_offsets.item;
489 #undef __string
490 #define __string(item, src) __dynamic_array(char, item, -1) \
492 #undef __assign_str
493 #define __assign_str(dst, src) \
494 strcpy(__get_str(dst), src);
496 #undef TP_fast_assign
497 #define TP_fast_assign(args...) args
499 #undef TP_perf_assign
500 #define TP_perf_assign(args...)
502 #undef DECLARE_EVENT_CLASS
503 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
505 static notrace void \
506 ftrace_raw_event_##call(void *__data, proto) \
508 struct ftrace_event_call *event_call = __data; \
509 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
510 struct ring_buffer_event *event; \
511 struct ftrace_raw_##call *entry; \
512 struct ring_buffer *buffer; \
513 unsigned long irq_flags; \
514 int __data_size; \
515 int pc; \
517 local_save_flags(irq_flags); \
518 pc = preempt_count(); \
520 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
522 event = trace_current_buffer_lock_reserve(&buffer, \
523 event_call->event.type, \
524 sizeof(*entry) + __data_size, \
525 irq_flags, pc); \
526 if (!event) \
527 return; \
528 entry = ring_buffer_event_data(event); \
530 tstruct \
532 { assign; } \
534 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
535 trace_nowake_buffer_unlock_commit(buffer, \
536 event, irq_flags, pc); \
539 * The ftrace_test_probe is compiled out, it is only here as a build time check
540 * to make sure that if the tracepoint handling changes, the ftrace probe will
541 * fail to compile unless it too is updated.
544 #undef DEFINE_EVENT
545 #define DEFINE_EVENT(template, call, proto, args) \
546 static inline void ftrace_test_probe_##call(void) \
548 check_trace_callback_type_##call(ftrace_raw_event_##template); \
551 #undef DEFINE_EVENT_PRINT
552 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
554 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
556 #undef __entry
557 #define __entry REC
559 #undef __print_flags
560 #undef __print_symbolic
561 #undef __get_dynamic_array
562 #undef __get_str
564 #undef TP_printk
565 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
567 #undef DECLARE_EVENT_CLASS
568 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
569 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
570 static const char print_fmt_##call[] = print; \
571 static struct ftrace_event_class __used event_class_##call = { \
572 .system = __stringify(TRACE_SYSTEM), \
573 .define_fields = ftrace_define_fields_##call, \
574 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
575 .raw_init = trace_event_raw_init, \
576 .probe = ftrace_raw_event_##call, \
577 .reg = ftrace_event_reg, \
578 _TRACE_PERF_INIT(call) \
581 #undef DEFINE_EVENT
582 #define DEFINE_EVENT(template, call, proto, args) \
584 static struct ftrace_event_call __used event_##call = { \
585 .name = #call, \
586 .class = &event_class_##template, \
587 .event.funcs = &ftrace_event_type_funcs_##template, \
588 .print_fmt = print_fmt_##template, \
589 }; \
590 static struct ftrace_event_call __used \
591 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
593 #undef DEFINE_EVENT_PRINT
594 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
596 static const char print_fmt_##call[] = print; \
598 static struct ftrace_event_call __used event_##call = { \
599 .name = #call, \
600 .class = &event_class_##template, \
601 .event.funcs = &ftrace_event_type_funcs_##call, \
602 .print_fmt = print_fmt_##call, \
603 }; \
604 static struct ftrace_event_call __used \
605 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
607 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
610 * Define the insertion callback to perf events
612 * The job is very similar to ftrace_raw_event_<call> except that we don't
613 * insert in the ring buffer but in a perf counter.
615 * static void ftrace_perf_<call>(proto)
617 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
618 * struct ftrace_event_call *event_call = &event_<call>;
619 * extern void perf_tp_event(int, u64, u64, void *, int);
620 * struct ftrace_raw_##call *entry;
621 * struct perf_trace_buf *trace_buf;
622 * u64 __addr = 0, __count = 1;
623 * unsigned long irq_flags;
624 * struct trace_entry *ent;
625 * int __entry_size;
626 * int __data_size;
627 * int __cpu
628 * int pc;
630 * pc = preempt_count();
632 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
634 * // Below we want to get the aligned size by taking into account
635 * // the u32 field that will later store the buffer size
636 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
637 * sizeof(u64));
638 * __entry_size -= sizeof(u32);
640 * // Protect the non nmi buffer
641 * // This also protects the rcu read side
642 * local_irq_save(irq_flags);
643 * __cpu = smp_processor_id();
645 * if (in_nmi())
646 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
647 * else
648 * trace_buf = rcu_dereference_sched(perf_trace_buf);
650 * if (!trace_buf)
651 * goto end;
653 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
655 * // Avoid recursion from perf that could mess up the buffer
656 * if (trace_buf->recursion++)
657 * goto end_recursion;
659 * raw_data = trace_buf->buf;
661 * // Make recursion update visible before entering perf_tp_event
662 * // so that we protect from perf recursions.
664 * barrier();
666 * //zero dead bytes from alignment to avoid stack leak to userspace:
667 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
668 * entry = (struct ftrace_raw_<call> *)raw_data;
669 * ent = &entry->ent;
670 * tracing_generic_entry_update(ent, irq_flags, pc);
671 * ent->type = event_call->id;
673 * <tstruct> <- do some jobs with dynamic arrays
675 * <assign> <- affect our values
677 * perf_tp_event(event_call->id, __addr, __count, entry,
678 * __entry_size); <- submit them to perf counter
683 #ifdef CONFIG_PERF_EVENTS
685 #undef __entry
686 #define __entry entry
688 #undef __get_dynamic_array
689 #define __get_dynamic_array(field) \
690 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
692 #undef __get_str
693 #define __get_str(field) (char *)__get_dynamic_array(field)
695 #undef __perf_addr
696 #define __perf_addr(a) __addr = (a)
698 #undef __perf_count
699 #define __perf_count(c) __count = (c)
701 #undef DECLARE_EVENT_CLASS
702 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
703 static notrace void \
704 perf_trace_##call(void *__data, proto) \
706 struct ftrace_event_call *event_call = __data; \
707 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
708 struct ftrace_raw_##call *entry; \
709 struct pt_regs __regs; \
710 u64 __addr = 0, __count = 1; \
711 struct hlist_head *head; \
712 int __entry_size; \
713 int __data_size; \
714 int rctx; \
716 perf_fetch_caller_regs(&__regs); \
718 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
719 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
720 sizeof(u64)); \
721 __entry_size -= sizeof(u32); \
723 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
724 "profile buffer not large enough")) \
725 return; \
727 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
728 __entry_size, event_call->event.type, &__regs, &rctx); \
729 if (!entry) \
730 return; \
732 tstruct \
734 { assign; } \
736 head = this_cpu_ptr(event_call->perf_events); \
737 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
738 __count, &__regs, head); \
742 * This part is compiled out, it is only here as a build time check
743 * to make sure that if the tracepoint handling changes, the
744 * perf probe will fail to compile unless it too is updated.
746 #undef DEFINE_EVENT
747 #define DEFINE_EVENT(template, call, proto, args) \
748 static inline void perf_test_probe_##call(void) \
750 check_trace_callback_type_##call(perf_trace_##template); \
754 #undef DEFINE_EVENT_PRINT
755 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
756 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
758 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
759 #endif /* CONFIG_PERF_EVENTS */
761 #undef _TRACE_PROFILE_INIT