1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_trigger - trace event triggers
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
17 static LIST_HEAD(trigger_commands
);
18 static DEFINE_MUTEX(trigger_cmd_mutex
);
20 void trigger_data_free(struct event_trigger_data
*data
)
22 if (data
->cmd_ops
->set_filter
)
23 data
->cmd_ops
->set_filter(NULL
, data
, NULL
);
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @rec: The trace entry for the event, NULL for unconditional invocation
36 * For each trigger associated with an event, invoke the trigger
37 * function registered with the associated trigger command. If rec is
38 * non-NULL, it means that the trigger requires further processing and
39 * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 * trigger has a filter associated with it, rec will checked against
41 * the filter and if the record matches the trigger will be invoked.
42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 * in any case until the current event is written, the trigger
44 * function isn't invoked but the bit associated with the deferred
45 * trigger is set in the return value.
47 * Returns an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
50 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
52 * Return: an enum event_trigger_type value containing a set bit for
53 * any trigger that should be deferred, ETT_NONE if nothing to defer.
55 enum event_trigger_type
56 event_triggers_call(struct trace_event_file
*file
, void *rec
,
57 struct ring_buffer_event
*event
)
59 struct event_trigger_data
*data
;
60 enum event_trigger_type tt
= ETT_NONE
;
61 struct event_filter
*filter
;
63 if (list_empty(&file
->triggers
))
66 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
70 data
->ops
->func(data
, rec
, event
);
73 filter
= rcu_dereference_sched(data
->filter
);
74 if (filter
&& !filter_match_preds(filter
, rec
))
76 if (event_command_post_trigger(data
->cmd_ops
)) {
77 tt
|= data
->cmd_ops
->trigger_type
;
80 data
->ops
->func(data
, rec
, event
);
84 EXPORT_SYMBOL_GPL(event_triggers_call
);
87 * event_triggers_post_call - Call 'post_triggers' for a trace event
88 * @file: The trace_event_file associated with the event
89 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
91 * For each trigger associated with an event, invoke the trigger
92 * function registered with the associated trigger command, if the
93 * corresponding bit is set in the tt enum passed into this function.
94 * See @event_triggers_call for details on how those bits are set.
96 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
99 event_triggers_post_call(struct trace_event_file
*file
,
100 enum event_trigger_type tt
)
102 struct event_trigger_data
*data
;
104 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
107 if (data
->cmd_ops
->trigger_type
& tt
)
108 data
->ops
->func(data
, NULL
, NULL
);
111 EXPORT_SYMBOL_GPL(event_triggers_post_call
);
113 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
115 static void *trigger_next(struct seq_file
*m
, void *t
, loff_t
*pos
)
117 struct trace_event_file
*event_file
= event_file_data(m
->private);
119 if (t
== SHOW_AVAILABLE_TRIGGERS
)
122 return seq_list_next(t
, &event_file
->triggers
, pos
);
125 static void *trigger_start(struct seq_file
*m
, loff_t
*pos
)
127 struct trace_event_file
*event_file
;
129 /* ->stop() is called even if ->start() fails */
130 mutex_lock(&event_mutex
);
131 event_file
= event_file_data(m
->private);
132 if (unlikely(!event_file
))
133 return ERR_PTR(-ENODEV
);
135 if (list_empty(&event_file
->triggers
))
136 return *pos
== 0 ? SHOW_AVAILABLE_TRIGGERS
: NULL
;
138 return seq_list_start(&event_file
->triggers
, *pos
);
141 static void trigger_stop(struct seq_file
*m
, void *t
)
143 mutex_unlock(&event_mutex
);
146 static int trigger_show(struct seq_file
*m
, void *v
)
148 struct event_trigger_data
*data
;
149 struct event_command
*p
;
151 if (v
== SHOW_AVAILABLE_TRIGGERS
) {
152 seq_puts(m
, "# Available triggers:\n");
154 mutex_lock(&trigger_cmd_mutex
);
155 list_for_each_entry_reverse(p
, &trigger_commands
, list
)
156 seq_printf(m
, " %s", p
->name
);
158 mutex_unlock(&trigger_cmd_mutex
);
162 data
= list_entry(v
, struct event_trigger_data
, list
);
163 data
->ops
->print(m
, data
->ops
, data
);
168 static const struct seq_operations event_triggers_seq_ops
= {
169 .start
= trigger_start
,
170 .next
= trigger_next
,
171 .stop
= trigger_stop
,
172 .show
= trigger_show
,
175 static int event_trigger_regex_open(struct inode
*inode
, struct file
*file
)
179 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
183 mutex_lock(&event_mutex
);
185 if (unlikely(!event_file_data(file
))) {
186 mutex_unlock(&event_mutex
);
190 if ((file
->f_mode
& FMODE_WRITE
) &&
191 (file
->f_flags
& O_TRUNC
)) {
192 struct trace_event_file
*event_file
;
193 struct event_command
*p
;
195 event_file
= event_file_data(file
);
197 list_for_each_entry(p
, &trigger_commands
, list
) {
199 p
->unreg_all(event_file
);
203 if (file
->f_mode
& FMODE_READ
) {
204 ret
= seq_open(file
, &event_triggers_seq_ops
);
206 struct seq_file
*m
= file
->private_data
;
211 mutex_unlock(&event_mutex
);
216 static int trigger_process_regex(struct trace_event_file
*file
, char *buff
)
218 char *command
, *next
= buff
;
219 struct event_command
*p
;
222 command
= strsep(&next
, ": \t");
223 command
= (command
[0] != '!') ? command
: command
+ 1;
225 mutex_lock(&trigger_cmd_mutex
);
226 list_for_each_entry(p
, &trigger_commands
, list
) {
227 if (strcmp(p
->name
, command
) == 0) {
228 ret
= p
->func(p
, file
, buff
, command
, next
);
233 mutex_unlock(&trigger_cmd_mutex
);
238 static ssize_t
event_trigger_regex_write(struct file
*file
,
239 const char __user
*ubuf
,
240 size_t cnt
, loff_t
*ppos
)
242 struct trace_event_file
*event_file
;
249 if (cnt
>= PAGE_SIZE
)
252 buf
= memdup_user_nul(ubuf
, cnt
);
258 mutex_lock(&event_mutex
);
259 event_file
= event_file_data(file
);
260 if (unlikely(!event_file
)) {
261 mutex_unlock(&event_mutex
);
265 ret
= trigger_process_regex(event_file
, buf
);
266 mutex_unlock(&event_mutex
);
278 static int event_trigger_regex_release(struct inode
*inode
, struct file
*file
)
280 mutex_lock(&event_mutex
);
282 if (file
->f_mode
& FMODE_READ
)
283 seq_release(inode
, file
);
285 mutex_unlock(&event_mutex
);
291 event_trigger_write(struct file
*filp
, const char __user
*ubuf
,
292 size_t cnt
, loff_t
*ppos
)
294 return event_trigger_regex_write(filp
, ubuf
, cnt
, ppos
);
298 event_trigger_open(struct inode
*inode
, struct file
*filp
)
300 /* Checks for tracefs lockdown */
301 return event_trigger_regex_open(inode
, filp
);
305 event_trigger_release(struct inode
*inode
, struct file
*file
)
307 return event_trigger_regex_release(inode
, file
);
310 const struct file_operations event_trigger_fops
= {
311 .open
= event_trigger_open
,
313 .write
= event_trigger_write
,
314 .llseek
= tracing_lseek
,
315 .release
= event_trigger_release
,
319 * Currently we only register event commands from __init, so mark this
322 __init
int register_event_command(struct event_command
*cmd
)
324 struct event_command
*p
;
327 mutex_lock(&trigger_cmd_mutex
);
328 list_for_each_entry(p
, &trigger_commands
, list
) {
329 if (strcmp(cmd
->name
, p
->name
) == 0) {
334 list_add(&cmd
->list
, &trigger_commands
);
336 mutex_unlock(&trigger_cmd_mutex
);
342 * Currently we only unregister event commands from __init, so mark
345 __init
int unregister_event_command(struct event_command
*cmd
)
347 struct event_command
*p
, *n
;
350 mutex_lock(&trigger_cmd_mutex
);
351 list_for_each_entry_safe(p
, n
, &trigger_commands
, list
) {
352 if (strcmp(cmd
->name
, p
->name
) == 0) {
354 list_del_init(&p
->list
);
359 mutex_unlock(&trigger_cmd_mutex
);
365 * event_trigger_print - Generic event_trigger_ops @print implementation
366 * @name: The name of the event trigger
367 * @m: The seq_file being printed to
368 * @data: Trigger-specific data
369 * @filter_str: filter_str to print, if present
371 * Common implementation for event triggers to print themselves.
373 * Usually wrapped by a function that simply sets the @name of the
374 * trigger command and then invokes this.
376 * Return: 0 on success, errno otherwise
379 event_trigger_print(const char *name
, struct seq_file
*m
,
380 void *data
, char *filter_str
)
382 long count
= (long)data
;
387 seq_puts(m
, ":unlimited");
389 seq_printf(m
, ":count=%ld", count
);
392 seq_printf(m
, " if %s\n", filter_str
);
400 * event_trigger_init - Generic event_trigger_ops @init implementation
401 * @ops: The trigger ops associated with the trigger
402 * @data: Trigger-specific data
404 * Common implementation of event trigger initialization.
406 * Usually used directly as the @init method in event trigger
409 * Return: 0 on success, errno otherwise
411 int event_trigger_init(struct event_trigger_ops
*ops
,
412 struct event_trigger_data
*data
)
419 * event_trigger_free - Generic event_trigger_ops @free implementation
420 * @ops: The trigger ops associated with the trigger
421 * @data: Trigger-specific data
423 * Common implementation of event trigger de-initialization.
425 * Usually used directly as the @free method in event trigger
429 event_trigger_free(struct event_trigger_ops
*ops
,
430 struct event_trigger_data
*data
)
432 if (WARN_ON_ONCE(data
->ref
<= 0))
437 trigger_data_free(data
);
440 int trace_event_trigger_enable_disable(struct trace_event_file
*file
,
445 if (trigger_enable
) {
446 if (atomic_inc_return(&file
->tm_ref
) > 1)
448 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
449 ret
= trace_event_enable_disable(file
, 1, 1);
451 if (atomic_dec_return(&file
->tm_ref
) > 0)
453 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT
, &file
->flags
);
454 ret
= trace_event_enable_disable(file
, 0, 1);
461 * clear_event_triggers - Clear all triggers associated with a trace array
462 * @tr: The trace array to clear
464 * For each trigger, the triggering event has its tm_ref decremented
465 * via trace_event_trigger_enable_disable(), and any associated event
466 * (in the case of enable/disable_event triggers) will have its sm_ref
467 * decremented via free()->trace_event_enable_disable(). That
468 * combination effectively reverses the soft-mode/trigger state added
469 * by trigger registration.
471 * Must be called with event_mutex held.
474 clear_event_triggers(struct trace_array
*tr
)
476 struct trace_event_file
*file
;
478 list_for_each_entry(file
, &tr
->events
, list
) {
479 struct event_trigger_data
*data
, *n
;
480 list_for_each_entry_safe(data
, n
, &file
->triggers
, list
) {
481 trace_event_trigger_enable_disable(file
, 0);
482 list_del_rcu(&data
->list
);
484 data
->ops
->free(data
->ops
, data
);
490 * update_cond_flag - Set or reset the TRIGGER_COND bit
491 * @file: The trace_event_file associated with the event
493 * If an event has triggers and any of those triggers has a filter or
494 * a post_trigger, trigger invocation needs to be deferred until after
495 * the current event has logged its data, and the event should have
496 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
499 void update_cond_flag(struct trace_event_file
*file
)
501 struct event_trigger_data
*data
;
502 bool set_cond
= false;
504 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
505 if (data
->filter
|| event_command_post_trigger(data
->cmd_ops
) ||
506 event_command_needs_rec(data
->cmd_ops
)) {
513 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
515 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT
, &file
->flags
);
519 * register_trigger - Generic event_command @reg implementation
520 * @glob: The raw string used to register the trigger
521 * @ops: The trigger ops associated with the trigger
522 * @data: Trigger-specific data to associate with the trigger
523 * @file: The trace_event_file associated with the event
525 * Common implementation for event trigger registration.
527 * Usually used directly as the @reg method in event command
530 * Return: 0 on success, errno otherwise
532 static int register_trigger(char *glob
, struct event_trigger_ops
*ops
,
533 struct event_trigger_data
*data
,
534 struct trace_event_file
*file
)
536 struct event_trigger_data
*test
;
539 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
540 if (test
->cmd_ops
->trigger_type
== data
->cmd_ops
->trigger_type
) {
546 if (data
->ops
->init
) {
547 ret
= data
->ops
->init(data
->ops
, data
);
552 list_add_rcu(&data
->list
, &file
->triggers
);
555 update_cond_flag(file
);
556 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
557 list_del_rcu(&data
->list
);
558 update_cond_flag(file
);
566 * unregister_trigger - Generic event_command @unreg implementation
567 * @glob: The raw string used to register the trigger
568 * @ops: The trigger ops associated with the trigger
569 * @test: Trigger-specific data used to find the trigger to remove
570 * @file: The trace_event_file associated with the event
572 * Common implementation for event trigger unregistration.
574 * Usually used directly as the @unreg method in event command
577 static void unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
578 struct event_trigger_data
*test
,
579 struct trace_event_file
*file
)
581 struct event_trigger_data
*data
;
582 bool unregistered
= false;
584 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
585 if (data
->cmd_ops
->trigger_type
== test
->cmd_ops
->trigger_type
) {
587 list_del_rcu(&data
->list
);
588 trace_event_trigger_enable_disable(file
, 0);
589 update_cond_flag(file
);
594 if (unregistered
&& data
->ops
->free
)
595 data
->ops
->free(data
->ops
, data
);
599 * event_trigger_callback - Generic event_command @func implementation
600 * @cmd_ops: The command ops, used for trigger registration
601 * @file: The trace_event_file associated with the event
602 * @glob: The raw string used to register the trigger
603 * @cmd: The cmd portion of the string used to register the trigger
604 * @param: The params portion of the string used to register the trigger
606 * Common implementation for event command parsing and trigger
609 * Usually used directly as the @func method in event command
612 * Return: 0 on success, errno otherwise
615 event_trigger_callback(struct event_command
*cmd_ops
,
616 struct trace_event_file
*file
,
617 char *glob
, char *cmd
, char *param
)
619 struct event_trigger_data
*trigger_data
;
620 struct event_trigger_ops
*trigger_ops
;
621 char *trigger
= NULL
;
625 /* separate the trigger from the filter (t:n [if filter]) */
626 if (param
&& isdigit(param
[0]))
627 trigger
= strsep(¶m
, " \t");
629 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
632 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
636 trigger_data
->count
= -1;
637 trigger_data
->ops
= trigger_ops
;
638 trigger_data
->cmd_ops
= cmd_ops
;
639 trigger_data
->private_data
= file
;
640 INIT_LIST_HEAD(&trigger_data
->list
);
641 INIT_LIST_HEAD(&trigger_data
->named_list
);
643 if (glob
[0] == '!') {
644 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
651 number
= strsep(&trigger
, ":");
658 * We use the callback data field (which is a pointer)
661 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
666 if (!param
) /* if param is non-empty, it's supposed to be a filter */
669 if (!cmd_ops
->set_filter
)
672 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
677 /* Up the trigger_data count to make sure reg doesn't free it on failure */
678 event_trigger_init(trigger_ops
, trigger_data
);
679 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
681 * The above returns on success the # of functions enabled,
682 * but if it didn't find any functions it returns zero.
683 * Consider no functions a failure too.
686 cmd_ops
->unreg(glob
, trigger_ops
, trigger_data
, file
);
691 /* Down the counter of trigger_data or free it if not used anymore */
692 event_trigger_free(trigger_ops
, trigger_data
);
697 if (cmd_ops
->set_filter
)
698 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
704 * set_trigger_filter - Generic event_command @set_filter implementation
705 * @filter_str: The filter string for the trigger, NULL to remove filter
706 * @trigger_data: Trigger-specific data
707 * @file: The trace_event_file associated with the event
709 * Common implementation for event command filter parsing and filter
712 * Usually used directly as the @set_filter method in event command
715 * Also used to remove a filter (if filter_str = NULL).
717 * Return: 0 on success, errno otherwise
719 int set_trigger_filter(char *filter_str
,
720 struct event_trigger_data
*trigger_data
,
721 struct trace_event_file
*file
)
723 struct event_trigger_data
*data
= trigger_data
;
724 struct event_filter
*filter
= NULL
, *tmp
;
728 if (!filter_str
) /* clear the current filter */
731 s
= strsep(&filter_str
, " \t");
733 if (!strlen(s
) || strcmp(s
, "if") != 0)
739 /* The filter is for the 'trigger' event, not the triggered event */
740 ret
= create_event_filter(file
->tr
, file
->event_call
,
741 filter_str
, false, &filter
);
743 * If create_event_filter() fails, filter still needs to be freed.
744 * Which the calling code will do with data->filter.
747 tmp
= rcu_access_pointer(data
->filter
);
749 rcu_assign_pointer(data
->filter
, filter
);
752 /* Make sure the call is done with the filter */
753 tracepoint_synchronize_unregister();
754 free_event_filter(tmp
);
757 kfree(data
->filter_str
);
758 data
->filter_str
= NULL
;
761 data
->filter_str
= kstrdup(filter_str
, GFP_KERNEL
);
762 if (!data
->filter_str
) {
763 free_event_filter(rcu_access_pointer(data
->filter
));
772 static LIST_HEAD(named_triggers
);
775 * find_named_trigger - Find the common named trigger associated with @name
776 * @name: The name of the set of named triggers to find the common data for
778 * Named triggers are sets of triggers that share a common set of
779 * trigger data. The first named trigger registered with a given name
780 * owns the common trigger data that the others subsequently
781 * registered with the same name will reference. This function
782 * returns the common trigger data associated with that first
783 * registered instance.
785 * Return: the common trigger data for the given named trigger on
786 * success, NULL otherwise.
788 struct event_trigger_data
*find_named_trigger(const char *name
)
790 struct event_trigger_data
*data
;
795 list_for_each_entry(data
, &named_triggers
, named_list
) {
796 if (data
->named_data
)
798 if (strcmp(data
->name
, name
) == 0)
806 * is_named_trigger - determine if a given trigger is a named trigger
807 * @test: The trigger data to test
809 * Return: true if 'test' is a named trigger, false otherwise.
811 bool is_named_trigger(struct event_trigger_data
*test
)
813 struct event_trigger_data
*data
;
815 list_for_each_entry(data
, &named_triggers
, named_list
) {
824 * save_named_trigger - save the trigger in the named trigger list
825 * @name: The name of the named trigger set
826 * @data: The trigger data to save
828 * Return: 0 if successful, negative error otherwise.
830 int save_named_trigger(const char *name
, struct event_trigger_data
*data
)
832 data
->name
= kstrdup(name
, GFP_KERNEL
);
836 list_add(&data
->named_list
, &named_triggers
);
842 * del_named_trigger - delete a trigger from the named trigger list
843 * @data: The trigger data to delete
845 void del_named_trigger(struct event_trigger_data
*data
)
850 list_del(&data
->named_list
);
853 static void __pause_named_trigger(struct event_trigger_data
*data
, bool pause
)
855 struct event_trigger_data
*test
;
857 list_for_each_entry(test
, &named_triggers
, named_list
) {
858 if (strcmp(test
->name
, data
->name
) == 0) {
860 test
->paused_tmp
= test
->paused
;
863 test
->paused
= test
->paused_tmp
;
870 * pause_named_trigger - Pause all named triggers with the same name
871 * @data: The trigger data of a named trigger to pause
873 * Pauses a named trigger along with all other triggers having the
874 * same name. Because named triggers share a common set of data,
875 * pausing only one is meaningless, so pausing one named trigger needs
876 * to pause all triggers with the same name.
878 void pause_named_trigger(struct event_trigger_data
*data
)
880 __pause_named_trigger(data
, true);
884 * unpause_named_trigger - Un-pause all named triggers with the same name
885 * @data: The trigger data of a named trigger to unpause
887 * Un-pauses a named trigger along with all other triggers having the
888 * same name. Because named triggers share a common set of data,
889 * unpausing only one is meaningless, so unpausing one named trigger
890 * needs to unpause all triggers with the same name.
892 void unpause_named_trigger(struct event_trigger_data
*data
)
894 __pause_named_trigger(data
, false);
898 * set_named_trigger_data - Associate common named trigger data
899 * @data: The trigger data of a named trigger to unpause
901 * Named triggers are sets of triggers that share a common set of
902 * trigger data. The first named trigger registered with a given name
903 * owns the common trigger data that the others subsequently
904 * registered with the same name will reference. This function
905 * associates the common trigger data from the first trigger with the
908 void set_named_trigger_data(struct event_trigger_data
*data
,
909 struct event_trigger_data
*named_data
)
911 data
->named_data
= named_data
;
914 struct event_trigger_data
*
915 get_named_trigger_data(struct event_trigger_data
*data
)
917 return data
->named_data
;
921 traceon_trigger(struct event_trigger_data
*data
, void *rec
,
922 struct ring_buffer_event
*event
)
931 traceon_count_trigger(struct event_trigger_data
*data
, void *rec
,
932 struct ring_buffer_event
*event
)
940 if (data
->count
!= -1)
947 traceoff_trigger(struct event_trigger_data
*data
, void *rec
,
948 struct ring_buffer_event
*event
)
950 if (!tracing_is_on())
957 traceoff_count_trigger(struct event_trigger_data
*data
, void *rec
,
958 struct ring_buffer_event
*event
)
960 if (!tracing_is_on())
966 if (data
->count
!= -1)
973 traceon_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
974 struct event_trigger_data
*data
)
976 return event_trigger_print("traceon", m
, (void *)data
->count
,
981 traceoff_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
982 struct event_trigger_data
*data
)
984 return event_trigger_print("traceoff", m
, (void *)data
->count
,
988 static struct event_trigger_ops traceon_trigger_ops
= {
989 .func
= traceon_trigger
,
990 .print
= traceon_trigger_print
,
991 .init
= event_trigger_init
,
992 .free
= event_trigger_free
,
995 static struct event_trigger_ops traceon_count_trigger_ops
= {
996 .func
= traceon_count_trigger
,
997 .print
= traceon_trigger_print
,
998 .init
= event_trigger_init
,
999 .free
= event_trigger_free
,
1002 static struct event_trigger_ops traceoff_trigger_ops
= {
1003 .func
= traceoff_trigger
,
1004 .print
= traceoff_trigger_print
,
1005 .init
= event_trigger_init
,
1006 .free
= event_trigger_free
,
1009 static struct event_trigger_ops traceoff_count_trigger_ops
= {
1010 .func
= traceoff_count_trigger
,
1011 .print
= traceoff_trigger_print
,
1012 .init
= event_trigger_init
,
1013 .free
= event_trigger_free
,
1016 static struct event_trigger_ops
*
1017 onoff_get_trigger_ops(char *cmd
, char *param
)
1019 struct event_trigger_ops
*ops
;
1021 /* we register both traceon and traceoff to this callback */
1022 if (strcmp(cmd
, "traceon") == 0)
1023 ops
= param
? &traceon_count_trigger_ops
:
1024 &traceon_trigger_ops
;
1026 ops
= param
? &traceoff_count_trigger_ops
:
1027 &traceoff_trigger_ops
;
1032 static struct event_command trigger_traceon_cmd
= {
1034 .trigger_type
= ETT_TRACE_ONOFF
,
1035 .func
= event_trigger_callback
,
1036 .reg
= register_trigger
,
1037 .unreg
= unregister_trigger
,
1038 .get_trigger_ops
= onoff_get_trigger_ops
,
1039 .set_filter
= set_trigger_filter
,
1042 static struct event_command trigger_traceoff_cmd
= {
1044 .trigger_type
= ETT_TRACE_ONOFF
,
1045 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1046 .func
= event_trigger_callback
,
1047 .reg
= register_trigger
,
1048 .unreg
= unregister_trigger
,
1049 .get_trigger_ops
= onoff_get_trigger_ops
,
1050 .set_filter
= set_trigger_filter
,
1053 #ifdef CONFIG_TRACER_SNAPSHOT
1055 snapshot_trigger(struct event_trigger_data
*data
, void *rec
,
1056 struct ring_buffer_event
*event
)
1058 struct trace_event_file
*file
= data
->private_data
;
1061 tracing_snapshot_instance(file
->tr
);
1067 snapshot_count_trigger(struct event_trigger_data
*data
, void *rec
,
1068 struct ring_buffer_event
*event
)
1073 if (data
->count
!= -1)
1076 snapshot_trigger(data
, rec
, event
);
1080 register_snapshot_trigger(char *glob
, struct event_trigger_ops
*ops
,
1081 struct event_trigger_data
*data
,
1082 struct trace_event_file
*file
)
1084 int ret
= register_trigger(glob
, ops
, data
, file
);
1086 if (ret
> 0 && tracing_alloc_snapshot_instance(file
->tr
) != 0) {
1087 unregister_trigger(glob
, ops
, data
, file
);
1095 snapshot_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1096 struct event_trigger_data
*data
)
1098 return event_trigger_print("snapshot", m
, (void *)data
->count
,
1102 static struct event_trigger_ops snapshot_trigger_ops
= {
1103 .func
= snapshot_trigger
,
1104 .print
= snapshot_trigger_print
,
1105 .init
= event_trigger_init
,
1106 .free
= event_trigger_free
,
1109 static struct event_trigger_ops snapshot_count_trigger_ops
= {
1110 .func
= snapshot_count_trigger
,
1111 .print
= snapshot_trigger_print
,
1112 .init
= event_trigger_init
,
1113 .free
= event_trigger_free
,
1116 static struct event_trigger_ops
*
1117 snapshot_get_trigger_ops(char *cmd
, char *param
)
1119 return param
? &snapshot_count_trigger_ops
: &snapshot_trigger_ops
;
1122 static struct event_command trigger_snapshot_cmd
= {
1124 .trigger_type
= ETT_SNAPSHOT
,
1125 .func
= event_trigger_callback
,
1126 .reg
= register_snapshot_trigger
,
1127 .unreg
= unregister_trigger
,
1128 .get_trigger_ops
= snapshot_get_trigger_ops
,
1129 .set_filter
= set_trigger_filter
,
1132 static __init
int register_trigger_snapshot_cmd(void)
1136 ret
= register_event_command(&trigger_snapshot_cmd
);
1142 static __init
int register_trigger_snapshot_cmd(void) { return 0; }
1143 #endif /* CONFIG_TRACER_SNAPSHOT */
1145 #ifdef CONFIG_STACKTRACE
1146 #ifdef CONFIG_UNWINDER_ORC
1148 * event_triggers_post_call()
1149 * trace_event_raw_event_xxx()
1151 # define STACK_SKIP 2
1155 * stacktrace_trigger()
1156 * event_triggers_post_call()
1157 * trace_event_buffer_commit()
1158 * trace_event_raw_event_xxx()
1160 #define STACK_SKIP 4
1164 stacktrace_trigger(struct event_trigger_data
*data
, void *rec
,
1165 struct ring_buffer_event
*event
)
1167 trace_dump_stack(STACK_SKIP
);
1171 stacktrace_count_trigger(struct event_trigger_data
*data
, void *rec
,
1172 struct ring_buffer_event
*event
)
1177 if (data
->count
!= -1)
1180 stacktrace_trigger(data
, rec
, event
);
1184 stacktrace_trigger_print(struct seq_file
*m
, struct event_trigger_ops
*ops
,
1185 struct event_trigger_data
*data
)
1187 return event_trigger_print("stacktrace", m
, (void *)data
->count
,
1191 static struct event_trigger_ops stacktrace_trigger_ops
= {
1192 .func
= stacktrace_trigger
,
1193 .print
= stacktrace_trigger_print
,
1194 .init
= event_trigger_init
,
1195 .free
= event_trigger_free
,
1198 static struct event_trigger_ops stacktrace_count_trigger_ops
= {
1199 .func
= stacktrace_count_trigger
,
1200 .print
= stacktrace_trigger_print
,
1201 .init
= event_trigger_init
,
1202 .free
= event_trigger_free
,
1205 static struct event_trigger_ops
*
1206 stacktrace_get_trigger_ops(char *cmd
, char *param
)
1208 return param
? &stacktrace_count_trigger_ops
: &stacktrace_trigger_ops
;
1211 static struct event_command trigger_stacktrace_cmd
= {
1212 .name
= "stacktrace",
1213 .trigger_type
= ETT_STACKTRACE
,
1214 .flags
= EVENT_CMD_FL_POST_TRIGGER
,
1215 .func
= event_trigger_callback
,
1216 .reg
= register_trigger
,
1217 .unreg
= unregister_trigger
,
1218 .get_trigger_ops
= stacktrace_get_trigger_ops
,
1219 .set_filter
= set_trigger_filter
,
1222 static __init
int register_trigger_stacktrace_cmd(void)
1226 ret
= register_event_command(&trigger_stacktrace_cmd
);
1232 static __init
int register_trigger_stacktrace_cmd(void) { return 0; }
1233 #endif /* CONFIG_STACKTRACE */
1235 static __init
void unregister_trigger_traceon_traceoff_cmds(void)
1237 unregister_event_command(&trigger_traceon_cmd
);
1238 unregister_event_command(&trigger_traceoff_cmd
);
1242 event_enable_trigger(struct event_trigger_data
*data
, void *rec
,
1243 struct ring_buffer_event
*event
)
1245 struct enable_trigger_data
*enable_data
= data
->private_data
;
1247 if (enable_data
->enable
)
1248 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1250 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &enable_data
->file
->flags
);
1254 event_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
1255 struct ring_buffer_event
*event
)
1257 struct enable_trigger_data
*enable_data
= data
->private_data
;
1262 /* Skip if the event is in a state we want to switch to */
1263 if (enable_data
->enable
== !(enable_data
->file
->flags
& EVENT_FILE_FL_SOFT_DISABLED
))
1266 if (data
->count
!= -1)
1269 event_enable_trigger(data
, rec
, event
);
1272 int event_enable_trigger_print(struct seq_file
*m
,
1273 struct event_trigger_ops
*ops
,
1274 struct event_trigger_data
*data
)
1276 struct enable_trigger_data
*enable_data
= data
->private_data
;
1278 seq_printf(m
, "%s:%s:%s",
1280 (enable_data
->enable
? ENABLE_HIST_STR
: DISABLE_HIST_STR
) :
1281 (enable_data
->enable
? ENABLE_EVENT_STR
: DISABLE_EVENT_STR
),
1282 enable_data
->file
->event_call
->class->system
,
1283 trace_event_name(enable_data
->file
->event_call
));
1285 if (data
->count
== -1)
1286 seq_puts(m
, ":unlimited");
1288 seq_printf(m
, ":count=%ld", data
->count
);
1290 if (data
->filter_str
)
1291 seq_printf(m
, " if %s\n", data
->filter_str
);
1298 void event_enable_trigger_free(struct event_trigger_ops
*ops
,
1299 struct event_trigger_data
*data
)
1301 struct enable_trigger_data
*enable_data
= data
->private_data
;
1303 if (WARN_ON_ONCE(data
->ref
<= 0))
1308 /* Remove the SOFT_MODE flag */
1309 trace_event_enable_disable(enable_data
->file
, 0, 1);
1310 module_put(enable_data
->file
->event_call
->mod
);
1311 trigger_data_free(data
);
1316 static struct event_trigger_ops event_enable_trigger_ops
= {
1317 .func
= event_enable_trigger
,
1318 .print
= event_enable_trigger_print
,
1319 .init
= event_trigger_init
,
1320 .free
= event_enable_trigger_free
,
1323 static struct event_trigger_ops event_enable_count_trigger_ops
= {
1324 .func
= event_enable_count_trigger
,
1325 .print
= event_enable_trigger_print
,
1326 .init
= event_trigger_init
,
1327 .free
= event_enable_trigger_free
,
1330 static struct event_trigger_ops event_disable_trigger_ops
= {
1331 .func
= event_enable_trigger
,
1332 .print
= event_enable_trigger_print
,
1333 .init
= event_trigger_init
,
1334 .free
= event_enable_trigger_free
,
1337 static struct event_trigger_ops event_disable_count_trigger_ops
= {
1338 .func
= event_enable_count_trigger
,
1339 .print
= event_enable_trigger_print
,
1340 .init
= event_trigger_init
,
1341 .free
= event_enable_trigger_free
,
1344 int event_enable_trigger_func(struct event_command
*cmd_ops
,
1345 struct trace_event_file
*file
,
1346 char *glob
, char *cmd
, char *param
)
1348 struct trace_event_file
*event_enable_file
;
1349 struct enable_trigger_data
*enable_data
;
1350 struct event_trigger_data
*trigger_data
;
1351 struct event_trigger_ops
*trigger_ops
;
1352 struct trace_array
*tr
= file
->tr
;
1364 /* separate the trigger from the filter (s:e:n [if filter]) */
1365 trigger
= strsep(¶m
, " \t");
1369 system
= strsep(&trigger
, ":");
1373 event
= strsep(&trigger
, ":");
1376 event_enable_file
= find_event_file(tr
, system
, event
);
1377 if (!event_enable_file
)
1380 #ifdef CONFIG_HIST_TRIGGERS
1381 hist
= ((strcmp(cmd
, ENABLE_HIST_STR
) == 0) ||
1382 (strcmp(cmd
, DISABLE_HIST_STR
) == 0));
1384 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1385 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1387 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1389 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1392 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1396 enable_data
= kzalloc(sizeof(*enable_data
), GFP_KERNEL
);
1398 kfree(trigger_data
);
1402 trigger_data
->count
= -1;
1403 trigger_data
->ops
= trigger_ops
;
1404 trigger_data
->cmd_ops
= cmd_ops
;
1405 INIT_LIST_HEAD(&trigger_data
->list
);
1406 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1408 enable_data
->hist
= hist
;
1409 enable_data
->enable
= enable
;
1410 enable_data
->file
= event_enable_file
;
1411 trigger_data
->private_data
= enable_data
;
1413 if (glob
[0] == '!') {
1414 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1415 kfree(trigger_data
);
1421 /* Up the trigger_data count to make sure nothing frees it on failure */
1422 event_trigger_init(trigger_ops
, trigger_data
);
1425 number
= strsep(&trigger
, ":");
1428 if (!strlen(number
))
1432 * We use the callback data field (which is a pointer)
1435 ret
= kstrtoul(number
, 0, &trigger_data
->count
);
1440 if (!param
) /* if param is non-empty, it's supposed to be a filter */
1443 if (!cmd_ops
->set_filter
)
1446 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1451 /* Don't let event modules unload while probe registered */
1452 ret
= try_module_get(event_enable_file
->event_call
->mod
);
1458 ret
= trace_event_enable_disable(event_enable_file
, 1, 1);
1461 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1463 * The above returns on success the # of functions enabled,
1464 * but if it didn't find any functions it returns zero.
1465 * Consider no functions a failure too.
1472 /* Just return zero, not the number of enabled functions */
1474 event_trigger_free(trigger_ops
, trigger_data
);
1479 trace_event_enable_disable(event_enable_file
, 0, 1);
1481 module_put(event_enable_file
->event_call
->mod
);
1483 if (cmd_ops
->set_filter
)
1484 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1485 event_trigger_free(trigger_ops
, trigger_data
);
1490 int event_enable_register_trigger(char *glob
,
1491 struct event_trigger_ops
*ops
,
1492 struct event_trigger_data
*data
,
1493 struct trace_event_file
*file
)
1495 struct enable_trigger_data
*enable_data
= data
->private_data
;
1496 struct enable_trigger_data
*test_enable_data
;
1497 struct event_trigger_data
*test
;
1500 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1501 test_enable_data
= test
->private_data
;
1502 if (test_enable_data
&&
1503 (test
->cmd_ops
->trigger_type
==
1504 data
->cmd_ops
->trigger_type
) &&
1505 (test_enable_data
->file
== enable_data
->file
)) {
1511 if (data
->ops
->init
) {
1512 ret
= data
->ops
->init(data
->ops
, data
);
1517 list_add_rcu(&data
->list
, &file
->triggers
);
1520 update_cond_flag(file
);
1521 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1522 list_del_rcu(&data
->list
);
1523 update_cond_flag(file
);
1530 void event_enable_unregister_trigger(char *glob
,
1531 struct event_trigger_ops
*ops
,
1532 struct event_trigger_data
*test
,
1533 struct trace_event_file
*file
)
1535 struct enable_trigger_data
*test_enable_data
= test
->private_data
;
1536 struct enable_trigger_data
*enable_data
;
1537 struct event_trigger_data
*data
;
1538 bool unregistered
= false;
1540 list_for_each_entry_rcu(data
, &file
->triggers
, list
) {
1541 enable_data
= data
->private_data
;
1543 (data
->cmd_ops
->trigger_type
==
1544 test
->cmd_ops
->trigger_type
) &&
1545 (enable_data
->file
== test_enable_data
->file
)) {
1546 unregistered
= true;
1547 list_del_rcu(&data
->list
);
1548 trace_event_trigger_enable_disable(file
, 0);
1549 update_cond_flag(file
);
1554 if (unregistered
&& data
->ops
->free
)
1555 data
->ops
->free(data
->ops
, data
);
1558 static struct event_trigger_ops
*
1559 event_enable_get_trigger_ops(char *cmd
, char *param
)
1561 struct event_trigger_ops
*ops
;
1564 #ifdef CONFIG_HIST_TRIGGERS
1565 enable
= ((strcmp(cmd
, ENABLE_EVENT_STR
) == 0) ||
1566 (strcmp(cmd
, ENABLE_HIST_STR
) == 0));
1568 enable
= strcmp(cmd
, ENABLE_EVENT_STR
) == 0;
1571 ops
= param
? &event_enable_count_trigger_ops
:
1572 &event_enable_trigger_ops
;
1574 ops
= param
? &event_disable_count_trigger_ops
:
1575 &event_disable_trigger_ops
;
1580 static struct event_command trigger_enable_cmd
= {
1581 .name
= ENABLE_EVENT_STR
,
1582 .trigger_type
= ETT_EVENT_ENABLE
,
1583 .func
= event_enable_trigger_func
,
1584 .reg
= event_enable_register_trigger
,
1585 .unreg
= event_enable_unregister_trigger
,
1586 .get_trigger_ops
= event_enable_get_trigger_ops
,
1587 .set_filter
= set_trigger_filter
,
1590 static struct event_command trigger_disable_cmd
= {
1591 .name
= DISABLE_EVENT_STR
,
1592 .trigger_type
= ETT_EVENT_ENABLE
,
1593 .func
= event_enable_trigger_func
,
1594 .reg
= event_enable_register_trigger
,
1595 .unreg
= event_enable_unregister_trigger
,
1596 .get_trigger_ops
= event_enable_get_trigger_ops
,
1597 .set_filter
= set_trigger_filter
,
1600 static __init
void unregister_trigger_enable_disable_cmds(void)
1602 unregister_event_command(&trigger_enable_cmd
);
1603 unregister_event_command(&trigger_disable_cmd
);
1606 static __init
int register_trigger_enable_disable_cmds(void)
1610 ret
= register_event_command(&trigger_enable_cmd
);
1611 if (WARN_ON(ret
< 0))
1613 ret
= register_event_command(&trigger_disable_cmd
);
1614 if (WARN_ON(ret
< 0))
1615 unregister_trigger_enable_disable_cmds();
1620 static __init
int register_trigger_traceon_traceoff_cmds(void)
1624 ret
= register_event_command(&trigger_traceon_cmd
);
1625 if (WARN_ON(ret
< 0))
1627 ret
= register_event_command(&trigger_traceoff_cmd
);
1628 if (WARN_ON(ret
< 0))
1629 unregister_trigger_traceon_traceoff_cmds();
1634 __init
int register_trigger_cmds(void)
1636 register_trigger_traceon_traceoff_cmds();
1637 register_trigger_snapshot_cmd();
1638 register_trigger_stacktrace_cmd();
1639 register_trigger_enable_disable_cmds();
1640 register_trigger_hist_enable_disable_cmds();
1641 register_trigger_hist_cmd();