2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
28 #include "trace_output.h"
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
58 /* Order must be the same as enum filter_op_ids above */
59 static struct filter_op filter_ops
[] = {
70 { OP_NONE
, "OP_NONE", 0 },
71 { OP_OPEN_PAREN
, "(", 0 },
77 FILT_ERR_UNBALANCED_PAREN
,
78 FILT_ERR_TOO_MANY_OPERANDS
,
79 FILT_ERR_OPERAND_TOO_LONG
,
80 FILT_ERR_FIELD_NOT_FOUND
,
81 FILT_ERR_ILLEGAL_FIELD_OP
,
82 FILT_ERR_ILLEGAL_INTVAL
,
83 FILT_ERR_BAD_SUBSYS_FILTER
,
84 FILT_ERR_TOO_MANY_PREDS
,
85 FILT_ERR_MISSING_FIELD
,
86 FILT_ERR_INVALID_FILTER
,
87 FILT_ERR_IP_FIELD_ONLY
,
90 static char *err_text
[] = {
97 "Illegal operation for field type",
98 "Illegal integer value",
99 "Couldn't find or set field in one of a subsystem's events",
100 "Too many terms in predicate expression",
101 "Missing field name and/or value",
102 "Meaningless filter expression",
103 "Only 'ip' field is supported for function trace",
108 struct list_head list
;
114 struct list_head list
;
117 struct filter_parse_state
{
118 struct filter_op
*ops
;
119 struct list_head opstack
;
120 struct list_head postfix
;
131 char string
[MAX_FILTER_STR_VAL
];
138 struct filter_pred
**preds
;
142 #define DEFINE_COMPARISON_PRED(type) \
143 static int filter_pred_##type(struct filter_pred *pred, void *event) \
145 type *addr = (type *)(event + pred->offset); \
146 type val = (type)pred->val; \
149 switch (pred->op) { \
151 match = (*addr < val); \
154 match = (*addr <= val); \
157 match = (*addr > val); \
160 match = (*addr >= val); \
163 match = (*addr & val); \
172 #define DEFINE_EQUALITY_PRED(size) \
173 static int filter_pred_##size(struct filter_pred *pred, void *event) \
175 u##size *addr = (u##size *)(event + pred->offset); \
176 u##size val = (u##size)pred->val; \
179 match = (val == *addr) ^ pred->not; \
184 DEFINE_COMPARISON_PRED(s64
);
185 DEFINE_COMPARISON_PRED(u64
);
186 DEFINE_COMPARISON_PRED(s32
);
187 DEFINE_COMPARISON_PRED(u32
);
188 DEFINE_COMPARISON_PRED(s16
);
189 DEFINE_COMPARISON_PRED(u16
);
190 DEFINE_COMPARISON_PRED(s8
);
191 DEFINE_COMPARISON_PRED(u8
);
193 DEFINE_EQUALITY_PRED(64);
194 DEFINE_EQUALITY_PRED(32);
195 DEFINE_EQUALITY_PRED(16);
196 DEFINE_EQUALITY_PRED(8);
198 /* Filter predicate for fixed sized arrays of characters */
199 static int filter_pred_string(struct filter_pred
*pred
, void *event
)
201 char *addr
= (char *)(event
+ pred
->offset
);
204 cmp
= pred
->regex
.match(addr
, &pred
->regex
, pred
->regex
.field_len
);
206 match
= cmp
^ pred
->not;
211 /* Filter predicate for char * pointers */
212 static int filter_pred_pchar(struct filter_pred
*pred
, void *event
)
214 char **addr
= (char **)(event
+ pred
->offset
);
216 int len
= strlen(*addr
) + 1; /* including tailing '\0' */
218 cmp
= pred
->regex
.match(*addr
, &pred
->regex
, len
);
220 match
= cmp
^ pred
->not;
226 * Filter predicate for dynamic sized arrays of characters.
227 * These are implemented through a list of strings at the end
229 * Also each of these strings have a field in the entry which
230 * contains its offset from the beginning of the entry.
231 * We have then first to get this field, dereference it
232 * and add it to the address of the entry, and at last we have
233 * the address of the string.
235 static int filter_pred_strloc(struct filter_pred
*pred
, void *event
)
237 u32 str_item
= *(u32
*)(event
+ pred
->offset
);
238 int str_loc
= str_item
& 0xffff;
239 int str_len
= str_item
>> 16;
240 char *addr
= (char *)(event
+ str_loc
);
243 cmp
= pred
->regex
.match(addr
, &pred
->regex
, str_len
);
245 match
= cmp
^ pred
->not;
250 static int filter_pred_none(struct filter_pred
*pred
, void *event
)
256 * regex_match_foo - Basic regex callbacks
258 * @str: the string to be searched
259 * @r: the regex structure containing the pattern string
260 * @len: the length of the string to be searched (including '\0')
263 * - @str might not be NULL-terminated if it's of type DYN_STRING
267 static int regex_match_full(char *str
, struct regex
*r
, int len
)
269 if (strncmp(str
, r
->pattern
, len
) == 0)
274 static int regex_match_front(char *str
, struct regex
*r
, int len
)
276 if (strncmp(str
, r
->pattern
, r
->len
) == 0)
281 static int regex_match_middle(char *str
, struct regex
*r
, int len
)
283 if (strnstr(str
, r
->pattern
, len
))
288 static int regex_match_end(char *str
, struct regex
*r
, int len
)
290 int strlen
= len
- 1;
292 if (strlen
>= r
->len
&&
293 memcmp(str
+ strlen
- r
->len
, r
->pattern
, r
->len
) == 0)
299 * filter_parse_regex - parse a basic regex
300 * @buff: the raw regex
301 * @len: length of the regex
302 * @search: will point to the beginning of the string to compare
303 * @not: tell whether the match will have to be inverted
305 * This passes in a buffer containing a regex and this function will
306 * set search to point to the search part of the buffer and
307 * return the type of search it is (see enum above).
308 * This does modify buff.
311 * search returns the pointer to use for comparison.
312 * not returns 1 if buff started with a '!'
315 enum regex_type
filter_parse_regex(char *buff
, int len
, char **search
, int *not)
317 int type
= MATCH_FULL
;
320 if (buff
[0] == '!') {
329 for (i
= 0; i
< len
; i
++) {
330 if (buff
[i
] == '*') {
333 type
= MATCH_END_ONLY
;
335 if (type
== MATCH_END_ONLY
)
336 type
= MATCH_MIDDLE_ONLY
;
338 type
= MATCH_FRONT_ONLY
;
348 static void filter_build_regex(struct filter_pred
*pred
)
350 struct regex
*r
= &pred
->regex
;
352 enum regex_type type
= MATCH_FULL
;
355 if (pred
->op
== OP_GLOB
) {
356 type
= filter_parse_regex(r
->pattern
, r
->len
, &search
, ¬);
357 r
->len
= strlen(search
);
358 memmove(r
->pattern
, search
, r
->len
+1);
363 r
->match
= regex_match_full
;
365 case MATCH_FRONT_ONLY
:
366 r
->match
= regex_match_front
;
368 case MATCH_MIDDLE_ONLY
:
369 r
->match
= regex_match_middle
;
372 r
->match
= regex_match_end
;
385 static struct filter_pred
*
386 get_pred_parent(struct filter_pred
*pred
, struct filter_pred
*preds
,
387 int index
, enum move_type
*move
)
389 if (pred
->parent
& FILTER_PRED_IS_RIGHT
)
390 *move
= MOVE_UP_FROM_RIGHT
;
392 *move
= MOVE_UP_FROM_LEFT
;
393 pred
= &preds
[pred
->parent
& ~FILTER_PRED_IS_RIGHT
];
404 typedef int (*filter_pred_walkcb_t
) (enum move_type move
,
405 struct filter_pred
*pred
,
406 int *err
, void *data
);
408 static int walk_pred_tree(struct filter_pred
*preds
,
409 struct filter_pred
*root
,
410 filter_pred_walkcb_t cb
, void *data
)
412 struct filter_pred
*pred
= root
;
413 enum move_type move
= MOVE_DOWN
;
422 ret
= cb(move
, pred
, &err
, data
);
423 if (ret
== WALK_PRED_ABORT
)
425 if (ret
== WALK_PRED_PARENT
)
430 if (pred
->left
!= FILTER_PRED_INVALID
) {
431 pred
= &preds
[pred
->left
];
435 case MOVE_UP_FROM_LEFT
:
436 pred
= &preds
[pred
->right
];
439 case MOVE_UP_FROM_RIGHT
:
443 pred
= get_pred_parent(pred
, preds
,
456 * A series of AND or ORs where found together. Instead of
457 * climbing up and down the tree branches, an array of the
458 * ops were made in order of checks. We can just move across
459 * the array and short circuit if needed.
461 static int process_ops(struct filter_pred
*preds
,
462 struct filter_pred
*op
, void *rec
)
464 struct filter_pred
*pred
;
470 * Micro-optimization: We set type to true if op
471 * is an OR and false otherwise (AND). Then we
472 * just need to test if the match is equal to
473 * the type, and if it is, we can short circuit the
474 * rest of the checks:
476 * if ((match && op->op == OP_OR) ||
477 * (!match && op->op == OP_AND))
480 type
= op
->op
== OP_OR
;
482 for (i
= 0; i
< op
->val
; i
++) {
483 pred
= &preds
[op
->ops
[i
]];
484 if (!WARN_ON_ONCE(!pred
->fn
))
485 match
= pred
->fn(pred
, rec
);
492 struct filter_match_preds_data
{
493 struct filter_pred
*preds
;
498 static int filter_match_preds_cb(enum move_type move
, struct filter_pred
*pred
,
499 int *err
, void *data
)
501 struct filter_match_preds_data
*d
= data
;
506 /* only AND and OR have children */
507 if (pred
->left
!= FILTER_PRED_INVALID
) {
508 /* If ops is set, then it was folded. */
510 return WALK_PRED_DEFAULT
;
511 /* We can treat folded ops as a leaf node */
512 d
->match
= process_ops(d
->preds
, pred
, d
->rec
);
514 if (!WARN_ON_ONCE(!pred
->fn
))
515 d
->match
= pred
->fn(pred
, d
->rec
);
518 return WALK_PRED_PARENT
;
519 case MOVE_UP_FROM_LEFT
:
521 * Check for short circuits.
523 * Optimization: !!match == (pred->op == OP_OR)
525 * if ((match && pred->op == OP_OR) ||
526 * (!match && pred->op == OP_AND))
528 if (!!d
->match
== (pred
->op
== OP_OR
))
529 return WALK_PRED_PARENT
;
531 case MOVE_UP_FROM_RIGHT
:
535 return WALK_PRED_DEFAULT
;
538 /* return 1 if event matches, 0 otherwise (discard) */
539 int filter_match_preds(struct event_filter
*filter
, void *rec
)
541 struct filter_pred
*preds
;
542 struct filter_pred
*root
;
543 struct filter_match_preds_data data
= {
544 /* match is currently meaningless */
550 /* no filter is considered a match */
554 n_preds
= filter
->n_preds
;
559 * n_preds, root and filter->preds are protect with preemption disabled.
561 root
= rcu_dereference_sched(filter
->root
);
565 data
.preds
= preds
= rcu_dereference_sched(filter
->preds
);
566 ret
= walk_pred_tree(preds
, root
, filter_match_preds_cb
, &data
);
570 EXPORT_SYMBOL_GPL(filter_match_preds
);
572 static void parse_error(struct filter_parse_state
*ps
, int err
, int pos
)
575 ps
->lasterr_pos
= pos
;
578 static void remove_filter_string(struct event_filter
*filter
)
583 kfree(filter
->filter_string
);
584 filter
->filter_string
= NULL
;
587 static int replace_filter_string(struct event_filter
*filter
,
590 kfree(filter
->filter_string
);
591 filter
->filter_string
= kstrdup(filter_string
, GFP_KERNEL
);
592 if (!filter
->filter_string
)
598 static int append_filter_string(struct event_filter
*filter
,
602 char *new_filter_string
;
604 BUG_ON(!filter
->filter_string
);
605 newlen
= strlen(filter
->filter_string
) + strlen(string
) + 1;
606 new_filter_string
= kmalloc(newlen
, GFP_KERNEL
);
607 if (!new_filter_string
)
610 strcpy(new_filter_string
, filter
->filter_string
);
611 strcat(new_filter_string
, string
);
612 kfree(filter
->filter_string
);
613 filter
->filter_string
= new_filter_string
;
618 static void append_filter_err(struct filter_parse_state
*ps
,
619 struct event_filter
*filter
)
621 int pos
= ps
->lasterr_pos
;
624 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
628 append_filter_string(filter
, "\n");
629 memset(buf
, ' ', PAGE_SIZE
);
630 if (pos
> PAGE_SIZE
- 128)
633 pbuf
= &buf
[pos
] + 1;
635 sprintf(pbuf
, "\nparse_error: %s\n", err_text
[ps
->lasterr
]);
636 append_filter_string(filter
, buf
);
637 free_page((unsigned long) buf
);
640 static inline struct event_filter
*event_filter(struct ftrace_event_file
*file
)
642 if (file
->event_call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
643 return file
->event_call
->filter
;
648 /* caller must hold event_mutex */
649 void print_event_filter(struct ftrace_event_file
*file
, struct trace_seq
*s
)
651 struct event_filter
*filter
= event_filter(file
);
653 if (filter
&& filter
->filter_string
)
654 trace_seq_printf(s
, "%s\n", filter
->filter_string
);
656 trace_seq_puts(s
, "none\n");
659 void print_subsystem_event_filter(struct event_subsystem
*system
,
662 struct event_filter
*filter
;
664 mutex_lock(&event_mutex
);
665 filter
= system
->filter
;
666 if (filter
&& filter
->filter_string
)
667 trace_seq_printf(s
, "%s\n", filter
->filter_string
);
669 trace_seq_puts(s
, DEFAULT_SYS_FILTER_MESSAGE
"\n");
670 mutex_unlock(&event_mutex
);
673 static int __alloc_pred_stack(struct pred_stack
*stack
, int n_preds
)
675 stack
->preds
= kcalloc(n_preds
+ 1, sizeof(*stack
->preds
), GFP_KERNEL
);
678 stack
->index
= n_preds
;
682 static void __free_pred_stack(struct pred_stack
*stack
)
688 static int __push_pred_stack(struct pred_stack
*stack
,
689 struct filter_pred
*pred
)
691 int index
= stack
->index
;
693 if (WARN_ON(index
== 0))
696 stack
->preds
[--index
] = pred
;
697 stack
->index
= index
;
701 static struct filter_pred
*
702 __pop_pred_stack(struct pred_stack
*stack
)
704 struct filter_pred
*pred
;
705 int index
= stack
->index
;
707 pred
= stack
->preds
[index
++];
711 stack
->index
= index
;
715 static int filter_set_pred(struct event_filter
*filter
,
717 struct pred_stack
*stack
,
718 struct filter_pred
*src
)
720 struct filter_pred
*dest
= &filter
->preds
[idx
];
721 struct filter_pred
*left
;
722 struct filter_pred
*right
;
727 if (dest
->op
== OP_OR
|| dest
->op
== OP_AND
) {
728 right
= __pop_pred_stack(stack
);
729 left
= __pop_pred_stack(stack
);
733 * If both children can be folded
734 * and they are the same op as this op or a leaf,
735 * then this op can be folded.
737 if (left
->index
& FILTER_PRED_FOLD
&&
738 (left
->op
== dest
->op
||
739 left
->left
== FILTER_PRED_INVALID
) &&
740 right
->index
& FILTER_PRED_FOLD
&&
741 (right
->op
== dest
->op
||
742 right
->left
== FILTER_PRED_INVALID
))
743 dest
->index
|= FILTER_PRED_FOLD
;
745 dest
->left
= left
->index
& ~FILTER_PRED_FOLD
;
746 dest
->right
= right
->index
& ~FILTER_PRED_FOLD
;
747 left
->parent
= dest
->index
& ~FILTER_PRED_FOLD
;
748 right
->parent
= dest
->index
| FILTER_PRED_IS_RIGHT
;
751 * Make dest->left invalid to be used as a quick
752 * way to know this is a leaf node.
754 dest
->left
= FILTER_PRED_INVALID
;
756 /* All leafs allow folding the parent ops. */
757 dest
->index
|= FILTER_PRED_FOLD
;
760 return __push_pred_stack(stack
, dest
);
763 static void __free_preds(struct event_filter
*filter
)
768 for (i
= 0; i
< filter
->n_preds
; i
++)
769 kfree(filter
->preds
[i
].ops
);
770 kfree(filter
->preds
);
771 filter
->preds
= NULL
;
777 static void call_filter_disable(struct ftrace_event_call
*call
)
779 call
->flags
&= ~TRACE_EVENT_FL_FILTERED
;
782 static void filter_disable(struct ftrace_event_file
*file
)
784 struct ftrace_event_call
*call
= file
->event_call
;
786 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
787 call_filter_disable(call
);
789 file
->flags
&= ~FTRACE_EVENT_FL_FILTERED
;
792 static void __free_filter(struct event_filter
*filter
)
797 __free_preds(filter
);
798 kfree(filter
->filter_string
);
802 void free_event_filter(struct event_filter
*filter
)
804 __free_filter(filter
);
807 void destroy_call_preds(struct ftrace_event_call
*call
)
809 __free_filter(call
->filter
);
813 static void destroy_file_preds(struct ftrace_event_file
*file
)
815 __free_filter(file
->filter
);
820 * Called when destroying the ftrace_event_file.
821 * The file is being freed, so we do not need to worry about
822 * the file being currently used. This is for module code removing
823 * the tracepoints from within it.
825 void destroy_preds(struct ftrace_event_file
*file
)
827 if (file
->event_call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
828 destroy_call_preds(file
->event_call
);
830 destroy_file_preds(file
);
833 static struct event_filter
*__alloc_filter(void)
835 struct event_filter
*filter
;
837 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
841 static int __alloc_preds(struct event_filter
*filter
, int n_preds
)
843 struct filter_pred
*pred
;
847 __free_preds(filter
);
849 filter
->preds
= kcalloc(n_preds
, sizeof(*filter
->preds
), GFP_KERNEL
);
854 filter
->a_preds
= n_preds
;
857 for (i
= 0; i
< n_preds
; i
++) {
858 pred
= &filter
->preds
[i
];
859 pred
->fn
= filter_pred_none
;
865 static inline void __remove_filter(struct ftrace_event_file
*file
)
867 struct ftrace_event_call
*call
= file
->event_call
;
869 filter_disable(file
);
870 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
871 remove_filter_string(call
->filter
);
873 remove_filter_string(file
->filter
);
876 static void filter_free_subsystem_preds(struct event_subsystem
*system
,
877 struct trace_array
*tr
)
879 struct ftrace_event_file
*file
;
880 struct ftrace_event_call
*call
;
882 list_for_each_entry(file
, &tr
->events
, list
) {
883 call
= file
->event_call
;
884 if (strcmp(call
->class->system
, system
->name
) != 0)
887 __remove_filter(file
);
891 static inline void __free_subsystem_filter(struct ftrace_event_file
*file
)
893 struct ftrace_event_call
*call
= file
->event_call
;
895 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
) {
896 __free_filter(call
->filter
);
899 __free_filter(file
->filter
);
904 static void filter_free_subsystem_filters(struct event_subsystem
*system
,
905 struct trace_array
*tr
)
907 struct ftrace_event_file
*file
;
908 struct ftrace_event_call
*call
;
910 list_for_each_entry(file
, &tr
->events
, list
) {
911 call
= file
->event_call
;
912 if (strcmp(call
->class->system
, system
->name
) != 0)
914 __free_subsystem_filter(file
);
918 static int filter_add_pred(struct filter_parse_state
*ps
,
919 struct event_filter
*filter
,
920 struct filter_pred
*pred
,
921 struct pred_stack
*stack
)
925 if (WARN_ON(filter
->n_preds
== filter
->a_preds
)) {
926 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
930 err
= filter_set_pred(filter
, filter
->n_preds
, stack
, pred
);
939 int filter_assign_type(const char *type
)
941 if (strstr(type
, "__data_loc") && strstr(type
, "char"))
942 return FILTER_DYN_STRING
;
944 if (strchr(type
, '[') && strstr(type
, "char"))
945 return FILTER_STATIC_STRING
;
950 static bool is_function_field(struct ftrace_event_field
*field
)
952 return field
->filter_type
== FILTER_TRACE_FN
;
955 static bool is_string_field(struct ftrace_event_field
*field
)
957 return field
->filter_type
== FILTER_DYN_STRING
||
958 field
->filter_type
== FILTER_STATIC_STRING
||
959 field
->filter_type
== FILTER_PTR_STRING
;
962 static int is_legal_op(struct ftrace_event_field
*field
, int op
)
964 if (is_string_field(field
) &&
965 (op
!= OP_EQ
&& op
!= OP_NE
&& op
!= OP_GLOB
))
967 if (!is_string_field(field
) && op
== OP_GLOB
)
973 static filter_pred_fn_t
select_comparison_fn(int op
, int field_size
,
976 filter_pred_fn_t fn
= NULL
;
978 switch (field_size
) {
980 if (op
== OP_EQ
|| op
== OP_NE
)
982 else if (field_is_signed
)
983 fn
= filter_pred_s64
;
985 fn
= filter_pred_u64
;
988 if (op
== OP_EQ
|| op
== OP_NE
)
990 else if (field_is_signed
)
991 fn
= filter_pred_s32
;
993 fn
= filter_pred_u32
;
996 if (op
== OP_EQ
|| op
== OP_NE
)
998 else if (field_is_signed
)
999 fn
= filter_pred_s16
;
1001 fn
= filter_pred_u16
;
1004 if (op
== OP_EQ
|| op
== OP_NE
)
1006 else if (field_is_signed
)
1007 fn
= filter_pred_s8
;
1009 fn
= filter_pred_u8
;
1016 static int init_pred(struct filter_parse_state
*ps
,
1017 struct ftrace_event_field
*field
,
1018 struct filter_pred
*pred
)
1021 filter_pred_fn_t fn
= filter_pred_none
;
1022 unsigned long long val
;
1025 pred
->offset
= field
->offset
;
1027 if (!is_legal_op(field
, pred
->op
)) {
1028 parse_error(ps
, FILT_ERR_ILLEGAL_FIELD_OP
, 0);
1032 if (is_string_field(field
)) {
1033 filter_build_regex(pred
);
1035 if (field
->filter_type
== FILTER_STATIC_STRING
) {
1036 fn
= filter_pred_string
;
1037 pred
->regex
.field_len
= field
->size
;
1038 } else if (field
->filter_type
== FILTER_DYN_STRING
)
1039 fn
= filter_pred_strloc
;
1041 fn
= filter_pred_pchar
;
1042 } else if (is_function_field(field
)) {
1043 if (strcmp(field
->name
, "ip")) {
1044 parse_error(ps
, FILT_ERR_IP_FIELD_ONLY
, 0);
1048 if (field
->is_signed
)
1049 ret
= kstrtoll(pred
->regex
.pattern
, 0, &val
);
1051 ret
= kstrtoull(pred
->regex
.pattern
, 0, &val
);
1053 parse_error(ps
, FILT_ERR_ILLEGAL_INTVAL
, 0);
1058 fn
= select_comparison_fn(pred
->op
, field
->size
,
1061 parse_error(ps
, FILT_ERR_INVALID_OP
, 0);
1066 if (pred
->op
== OP_NE
)
1073 static void parse_init(struct filter_parse_state
*ps
,
1074 struct filter_op
*ops
,
1077 memset(ps
, '\0', sizeof(*ps
));
1079 ps
->infix
.string
= infix_string
;
1080 ps
->infix
.cnt
= strlen(infix_string
);
1083 INIT_LIST_HEAD(&ps
->opstack
);
1084 INIT_LIST_HEAD(&ps
->postfix
);
1087 static char infix_next(struct filter_parse_state
*ps
)
1091 return ps
->infix
.string
[ps
->infix
.tail
++];
1094 static char infix_peek(struct filter_parse_state
*ps
)
1096 if (ps
->infix
.tail
== strlen(ps
->infix
.string
))
1099 return ps
->infix
.string
[ps
->infix
.tail
];
1102 static void infix_advance(struct filter_parse_state
*ps
)
1108 static inline int is_precedence_lower(struct filter_parse_state
*ps
,
1111 return ps
->ops
[a
].precedence
< ps
->ops
[b
].precedence
;
1114 static inline int is_op_char(struct filter_parse_state
*ps
, char c
)
1118 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1119 if (ps
->ops
[i
].string
[0] == c
)
1126 static int infix_get_op(struct filter_parse_state
*ps
, char firstc
)
1128 char nextc
= infix_peek(ps
);
1136 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1137 if (!strcmp(opstr
, ps
->ops
[i
].string
)) {
1139 return ps
->ops
[i
].id
;
1145 for (i
= 0; strcmp(ps
->ops
[i
].string
, "OP_NONE"); i
++) {
1146 if (!strcmp(opstr
, ps
->ops
[i
].string
))
1147 return ps
->ops
[i
].id
;
1153 static inline void clear_operand_string(struct filter_parse_state
*ps
)
1155 memset(ps
->operand
.string
, '\0', MAX_FILTER_STR_VAL
);
1156 ps
->operand
.tail
= 0;
1159 static inline int append_operand_char(struct filter_parse_state
*ps
, char c
)
1161 if (ps
->operand
.tail
== MAX_FILTER_STR_VAL
- 1)
1164 ps
->operand
.string
[ps
->operand
.tail
++] = c
;
1169 static int filter_opstack_push(struct filter_parse_state
*ps
, int op
)
1171 struct opstack_op
*opstack_op
;
1173 opstack_op
= kmalloc(sizeof(*opstack_op
), GFP_KERNEL
);
1177 opstack_op
->op
= op
;
1178 list_add(&opstack_op
->list
, &ps
->opstack
);
1183 static int filter_opstack_empty(struct filter_parse_state
*ps
)
1185 return list_empty(&ps
->opstack
);
1188 static int filter_opstack_top(struct filter_parse_state
*ps
)
1190 struct opstack_op
*opstack_op
;
1192 if (filter_opstack_empty(ps
))
1195 opstack_op
= list_first_entry(&ps
->opstack
, struct opstack_op
, list
);
1197 return opstack_op
->op
;
1200 static int filter_opstack_pop(struct filter_parse_state
*ps
)
1202 struct opstack_op
*opstack_op
;
1205 if (filter_opstack_empty(ps
))
1208 opstack_op
= list_first_entry(&ps
->opstack
, struct opstack_op
, list
);
1209 op
= opstack_op
->op
;
1210 list_del(&opstack_op
->list
);
1217 static void filter_opstack_clear(struct filter_parse_state
*ps
)
1219 while (!filter_opstack_empty(ps
))
1220 filter_opstack_pop(ps
);
1223 static char *curr_operand(struct filter_parse_state
*ps
)
1225 return ps
->operand
.string
;
1228 static int postfix_append_operand(struct filter_parse_state
*ps
, char *operand
)
1230 struct postfix_elt
*elt
;
1232 elt
= kmalloc(sizeof(*elt
), GFP_KERNEL
);
1237 elt
->operand
= kstrdup(operand
, GFP_KERNEL
);
1238 if (!elt
->operand
) {
1243 list_add_tail(&elt
->list
, &ps
->postfix
);
1248 static int postfix_append_op(struct filter_parse_state
*ps
, int op
)
1250 struct postfix_elt
*elt
;
1252 elt
= kmalloc(sizeof(*elt
), GFP_KERNEL
);
1257 elt
->operand
= NULL
;
1259 list_add_tail(&elt
->list
, &ps
->postfix
);
1264 static void postfix_clear(struct filter_parse_state
*ps
)
1266 struct postfix_elt
*elt
;
1268 while (!list_empty(&ps
->postfix
)) {
1269 elt
= list_first_entry(&ps
->postfix
, struct postfix_elt
, list
);
1270 list_del(&elt
->list
);
1271 kfree(elt
->operand
);
1276 static int filter_parse(struct filter_parse_state
*ps
)
1282 while ((ch
= infix_next(ps
))) {
1294 if (is_op_char(ps
, ch
)) {
1295 op
= infix_get_op(ps
, ch
);
1296 if (op
== OP_NONE
) {
1297 parse_error(ps
, FILT_ERR_INVALID_OP
, 0);
1301 if (strlen(curr_operand(ps
))) {
1302 postfix_append_operand(ps
, curr_operand(ps
));
1303 clear_operand_string(ps
);
1306 while (!filter_opstack_empty(ps
)) {
1307 top_op
= filter_opstack_top(ps
);
1308 if (!is_precedence_lower(ps
, top_op
, op
)) {
1309 top_op
= filter_opstack_pop(ps
);
1310 postfix_append_op(ps
, top_op
);
1316 filter_opstack_push(ps
, op
);
1321 filter_opstack_push(ps
, OP_OPEN_PAREN
);
1326 if (strlen(curr_operand(ps
))) {
1327 postfix_append_operand(ps
, curr_operand(ps
));
1328 clear_operand_string(ps
);
1331 top_op
= filter_opstack_pop(ps
);
1332 while (top_op
!= OP_NONE
) {
1333 if (top_op
== OP_OPEN_PAREN
)
1335 postfix_append_op(ps
, top_op
);
1336 top_op
= filter_opstack_pop(ps
);
1338 if (top_op
== OP_NONE
) {
1339 parse_error(ps
, FILT_ERR_UNBALANCED_PAREN
, 0);
1345 if (append_operand_char(ps
, ch
)) {
1346 parse_error(ps
, FILT_ERR_OPERAND_TOO_LONG
, 0);
1351 if (strlen(curr_operand(ps
)))
1352 postfix_append_operand(ps
, curr_operand(ps
));
1354 while (!filter_opstack_empty(ps
)) {
1355 top_op
= filter_opstack_pop(ps
);
1356 if (top_op
== OP_NONE
)
1358 if (top_op
== OP_OPEN_PAREN
) {
1359 parse_error(ps
, FILT_ERR_UNBALANCED_PAREN
, 0);
1362 postfix_append_op(ps
, top_op
);
1368 static struct filter_pred
*create_pred(struct filter_parse_state
*ps
,
1369 struct ftrace_event_call
*call
,
1370 int op
, char *operand1
, char *operand2
)
1372 struct ftrace_event_field
*field
;
1373 static struct filter_pred pred
;
1375 memset(&pred
, 0, sizeof(pred
));
1378 if (op
== OP_AND
|| op
== OP_OR
)
1381 if (!operand1
|| !operand2
) {
1382 parse_error(ps
, FILT_ERR_MISSING_FIELD
, 0);
1386 field
= trace_find_event_field(call
, operand1
);
1388 parse_error(ps
, FILT_ERR_FIELD_NOT_FOUND
, 0);
1392 strcpy(pred
.regex
.pattern
, operand2
);
1393 pred
.regex
.len
= strlen(pred
.regex
.pattern
);
1395 return init_pred(ps
, field
, &pred
) ? NULL
: &pred
;
1398 static int check_preds(struct filter_parse_state
*ps
)
1400 int n_normal_preds
= 0, n_logical_preds
= 0;
1401 struct postfix_elt
*elt
;
1403 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1404 if (elt
->op
== OP_NONE
)
1407 if (elt
->op
== OP_AND
|| elt
->op
== OP_OR
) {
1414 if (!n_normal_preds
|| n_logical_preds
>= n_normal_preds
) {
1415 parse_error(ps
, FILT_ERR_INVALID_FILTER
, 0);
1422 static int count_preds(struct filter_parse_state
*ps
)
1424 struct postfix_elt
*elt
;
1427 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1428 if (elt
->op
== OP_NONE
)
1436 struct check_pred_data
{
1441 static int check_pred_tree_cb(enum move_type move
, struct filter_pred
*pred
,
1442 int *err
, void *data
)
1444 struct check_pred_data
*d
= data
;
1446 if (WARN_ON(d
->count
++ > d
->max
)) {
1448 return WALK_PRED_ABORT
;
1450 return WALK_PRED_DEFAULT
;
1454 * The tree is walked at filtering of an event. If the tree is not correctly
1455 * built, it may cause an infinite loop. Check here that the tree does
1458 static int check_pred_tree(struct event_filter
*filter
,
1459 struct filter_pred
*root
)
1461 struct check_pred_data data
= {
1463 * The max that we can hit a node is three times.
1464 * Once going down, once coming up from left, and
1465 * once coming up from right. This is more than enough
1466 * since leafs are only hit a single time.
1468 .max
= 3 * filter
->n_preds
,
1472 return walk_pred_tree(filter
->preds
, root
,
1473 check_pred_tree_cb
, &data
);
1476 static int count_leafs_cb(enum move_type move
, struct filter_pred
*pred
,
1477 int *err
, void *data
)
1481 if ((move
== MOVE_DOWN
) &&
1482 (pred
->left
== FILTER_PRED_INVALID
))
1485 return WALK_PRED_DEFAULT
;
1488 static int count_leafs(struct filter_pred
*preds
, struct filter_pred
*root
)
1492 ret
= walk_pred_tree(preds
, root
, count_leafs_cb
, &count
);
1497 struct fold_pred_data
{
1498 struct filter_pred
*root
;
1503 static int fold_pred_cb(enum move_type move
, struct filter_pred
*pred
,
1504 int *err
, void *data
)
1506 struct fold_pred_data
*d
= data
;
1507 struct filter_pred
*root
= d
->root
;
1509 if (move
!= MOVE_DOWN
)
1510 return WALK_PRED_DEFAULT
;
1511 if (pred
->left
!= FILTER_PRED_INVALID
)
1512 return WALK_PRED_DEFAULT
;
1514 if (WARN_ON(d
->count
== d
->children
)) {
1516 return WALK_PRED_ABORT
;
1519 pred
->index
&= ~FILTER_PRED_FOLD
;
1520 root
->ops
[d
->count
++] = pred
->index
;
1521 return WALK_PRED_DEFAULT
;
1524 static int fold_pred(struct filter_pred
*preds
, struct filter_pred
*root
)
1526 struct fold_pred_data data
= {
1532 /* No need to keep the fold flag */
1533 root
->index
&= ~FILTER_PRED_FOLD
;
1535 /* If the root is a leaf then do nothing */
1536 if (root
->left
== FILTER_PRED_INVALID
)
1539 /* count the children */
1540 children
= count_leafs(preds
, &preds
[root
->left
]);
1541 children
+= count_leafs(preds
, &preds
[root
->right
]);
1543 root
->ops
= kcalloc(children
, sizeof(*root
->ops
), GFP_KERNEL
);
1547 root
->val
= children
;
1548 data
.children
= children
;
1549 return walk_pred_tree(preds
, root
, fold_pred_cb
, &data
);
1552 static int fold_pred_tree_cb(enum move_type move
, struct filter_pred
*pred
,
1553 int *err
, void *data
)
1555 struct filter_pred
*preds
= data
;
1557 if (move
!= MOVE_DOWN
)
1558 return WALK_PRED_DEFAULT
;
1559 if (!(pred
->index
& FILTER_PRED_FOLD
))
1560 return WALK_PRED_DEFAULT
;
1562 *err
= fold_pred(preds
, pred
);
1564 return WALK_PRED_ABORT
;
1566 /* eveyrhing below is folded, continue with parent */
1567 return WALK_PRED_PARENT
;
1571 * To optimize the processing of the ops, if we have several "ors" or
1572 * "ands" together, we can put them in an array and process them all
1573 * together speeding up the filter logic.
1575 static int fold_pred_tree(struct event_filter
*filter
,
1576 struct filter_pred
*root
)
1578 return walk_pred_tree(filter
->preds
, root
, fold_pred_tree_cb
,
1582 static int replace_preds(struct ftrace_event_call
*call
,
1583 struct event_filter
*filter
,
1584 struct filter_parse_state
*ps
,
1585 char *filter_string
,
1588 char *operand1
= NULL
, *operand2
= NULL
;
1589 struct filter_pred
*pred
;
1590 struct filter_pred
*root
;
1591 struct postfix_elt
*elt
;
1592 struct pred_stack stack
= { }; /* init to NULL */
1596 n_preds
= count_preds(ps
);
1597 if (n_preds
>= MAX_FILTER_PRED
) {
1598 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
1602 err
= check_preds(ps
);
1607 err
= __alloc_pred_stack(&stack
, n_preds
);
1610 err
= __alloc_preds(filter
, n_preds
);
1616 list_for_each_entry(elt
, &ps
->postfix
, list
) {
1617 if (elt
->op
== OP_NONE
) {
1619 operand1
= elt
->operand
;
1621 operand2
= elt
->operand
;
1623 parse_error(ps
, FILT_ERR_TOO_MANY_OPERANDS
, 0);
1630 if (WARN_ON(n_preds
++ == MAX_FILTER_PRED
)) {
1631 parse_error(ps
, FILT_ERR_TOO_MANY_PREDS
, 0);
1636 pred
= create_pred(ps
, call
, elt
->op
, operand1
, operand2
);
1643 err
= filter_add_pred(ps
, filter
, pred
, &stack
);
1648 operand1
= operand2
= NULL
;
1652 /* We should have one item left on the stack */
1653 pred
= __pop_pred_stack(&stack
);
1656 /* This item is where we start from in matching */
1658 /* Make sure the stack is empty */
1659 pred
= __pop_pred_stack(&stack
);
1660 if (WARN_ON(pred
)) {
1662 filter
->root
= NULL
;
1665 err
= check_pred_tree(filter
, root
);
1669 /* Optimize the tree */
1670 err
= fold_pred_tree(filter
, root
);
1674 /* We don't set root until we know it works */
1676 filter
->root
= root
;
1681 __free_pred_stack(&stack
);
1685 static inline void event_set_filtered_flag(struct ftrace_event_file
*file
)
1687 struct ftrace_event_call
*call
= file
->event_call
;
1689 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
1690 call
->flags
|= TRACE_EVENT_FL_FILTERED
;
1692 file
->flags
|= FTRACE_EVENT_FL_FILTERED
;
1695 static inline void event_set_filter(struct ftrace_event_file
*file
,
1696 struct event_filter
*filter
)
1698 struct ftrace_event_call
*call
= file
->event_call
;
1700 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
1701 rcu_assign_pointer(call
->filter
, filter
);
1703 rcu_assign_pointer(file
->filter
, filter
);
1706 static inline void event_clear_filter(struct ftrace_event_file
*file
)
1708 struct ftrace_event_call
*call
= file
->event_call
;
1710 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
1711 RCU_INIT_POINTER(call
->filter
, NULL
);
1713 RCU_INIT_POINTER(file
->filter
, NULL
);
1717 event_set_no_set_filter_flag(struct ftrace_event_file
*file
)
1719 struct ftrace_event_call
*call
= file
->event_call
;
1721 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
1722 call
->flags
|= TRACE_EVENT_FL_NO_SET_FILTER
;
1724 file
->flags
|= FTRACE_EVENT_FL_NO_SET_FILTER
;
1728 event_clear_no_set_filter_flag(struct ftrace_event_file
*file
)
1730 struct ftrace_event_call
*call
= file
->event_call
;
1732 if (call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
)
1733 call
->flags
&= ~TRACE_EVENT_FL_NO_SET_FILTER
;
1735 file
->flags
&= ~FTRACE_EVENT_FL_NO_SET_FILTER
;
1739 event_no_set_filter_flag(struct ftrace_event_file
*file
)
1741 struct ftrace_event_call
*call
= file
->event_call
;
1743 if (file
->flags
& FTRACE_EVENT_FL_NO_SET_FILTER
)
1746 if ((call
->flags
& TRACE_EVENT_FL_USE_CALL_FILTER
) &&
1747 (call
->flags
& TRACE_EVENT_FL_NO_SET_FILTER
))
1753 struct filter_list
{
1754 struct list_head list
;
1755 struct event_filter
*filter
;
1758 static int replace_system_preds(struct event_subsystem
*system
,
1759 struct trace_array
*tr
,
1760 struct filter_parse_state
*ps
,
1761 char *filter_string
)
1763 struct ftrace_event_file
*file
;
1764 struct ftrace_event_call
*call
;
1765 struct filter_list
*filter_item
;
1766 struct filter_list
*tmp
;
1767 LIST_HEAD(filter_list
);
1771 list_for_each_entry(file
, &tr
->events
, list
) {
1772 call
= file
->event_call
;
1773 if (strcmp(call
->class->system
, system
->name
) != 0)
1777 * Try to see if the filter can be applied
1778 * (filter arg is ignored on dry_run)
1780 err
= replace_preds(call
, NULL
, ps
, filter_string
, true);
1782 event_set_no_set_filter_flag(file
);
1784 event_clear_no_set_filter_flag(file
);
1787 list_for_each_entry(file
, &tr
->events
, list
) {
1788 struct event_filter
*filter
;
1790 call
= file
->event_call
;
1792 if (strcmp(call
->class->system
, system
->name
) != 0)
1795 if (event_no_set_filter_flag(file
))
1798 filter_item
= kzalloc(sizeof(*filter_item
), GFP_KERNEL
);
1802 list_add_tail(&filter_item
->list
, &filter_list
);
1804 filter_item
->filter
= __alloc_filter();
1805 if (!filter_item
->filter
)
1807 filter
= filter_item
->filter
;
1809 /* Can only fail on no memory */
1810 err
= replace_filter_string(filter
, filter_string
);
1814 err
= replace_preds(call
, filter
, ps
, filter_string
, false);
1816 filter_disable(file
);
1817 parse_error(ps
, FILT_ERR_BAD_SUBSYS_FILTER
, 0);
1818 append_filter_err(ps
, filter
);
1820 event_set_filtered_flag(file
);
1822 * Regardless of if this returned an error, we still
1823 * replace the filter for the call.
1825 filter
= event_filter(file
);
1826 event_set_filter(file
, filter_item
->filter
);
1827 filter_item
->filter
= filter
;
1836 * The calls can still be using the old filters.
1837 * Do a synchronize_sched() to ensure all calls are
1838 * done with them before we free them.
1840 synchronize_sched();
1841 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1842 __free_filter(filter_item
->filter
);
1843 list_del(&filter_item
->list
);
1848 /* No call succeeded */
1849 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1850 list_del(&filter_item
->list
);
1853 parse_error(ps
, FILT_ERR_BAD_SUBSYS_FILTER
, 0);
1856 /* If any call succeeded, we still need to sync */
1858 synchronize_sched();
1859 list_for_each_entry_safe(filter_item
, tmp
, &filter_list
, list
) {
1860 __free_filter(filter_item
->filter
);
1861 list_del(&filter_item
->list
);
1867 static int create_filter_start(char *filter_str
, bool set_str
,
1868 struct filter_parse_state
**psp
,
1869 struct event_filter
**filterp
)
1871 struct event_filter
*filter
;
1872 struct filter_parse_state
*ps
= NULL
;
1875 WARN_ON_ONCE(*psp
|| *filterp
);
1877 /* allocate everything, and if any fails, free all and fail */
1878 filter
= __alloc_filter();
1879 if (filter
&& set_str
)
1880 err
= replace_filter_string(filter
, filter_str
);
1882 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
1884 if (!filter
|| !ps
|| err
) {
1886 __free_filter(filter
);
1890 /* we're committed to creating a new filter */
1894 parse_init(ps
, filter_ops
, filter_str
);
1895 err
= filter_parse(ps
);
1897 append_filter_err(ps
, filter
);
1901 static void create_filter_finish(struct filter_parse_state
*ps
)
1904 filter_opstack_clear(ps
);
1911 * create_filter - create a filter for a ftrace_event_call
1912 * @call: ftrace_event_call to create a filter for
1913 * @filter_str: filter string
1914 * @set_str: remember @filter_str and enable detailed error in filter
1915 * @filterp: out param for created filter (always updated on return)
1917 * Creates a filter for @call with @filter_str. If @set_str is %true,
1918 * @filter_str is copied and recorded in the new filter.
1920 * On success, returns 0 and *@filterp points to the new filter. On
1921 * failure, returns -errno and *@filterp may point to %NULL or to a new
1922 * filter. In the latter case, the returned filter contains error
1923 * information if @set_str is %true and the caller is responsible for
1926 static int create_filter(struct ftrace_event_call
*call
,
1927 char *filter_str
, bool set_str
,
1928 struct event_filter
**filterp
)
1930 struct event_filter
*filter
= NULL
;
1931 struct filter_parse_state
*ps
= NULL
;
1934 err
= create_filter_start(filter_str
, set_str
, &ps
, &filter
);
1936 err
= replace_preds(call
, filter
, ps
, filter_str
, false);
1938 append_filter_err(ps
, filter
);
1940 create_filter_finish(ps
);
1946 int create_event_filter(struct ftrace_event_call
*call
,
1947 char *filter_str
, bool set_str
,
1948 struct event_filter
**filterp
)
1950 return create_filter(call
, filter_str
, set_str
, filterp
);
1954 * create_system_filter - create a filter for an event_subsystem
1955 * @system: event_subsystem to create a filter for
1956 * @filter_str: filter string
1957 * @filterp: out param for created filter (always updated on return)
1959 * Identical to create_filter() except that it creates a subsystem filter
1960 * and always remembers @filter_str.
1962 static int create_system_filter(struct event_subsystem
*system
,
1963 struct trace_array
*tr
,
1964 char *filter_str
, struct event_filter
**filterp
)
1966 struct event_filter
*filter
= NULL
;
1967 struct filter_parse_state
*ps
= NULL
;
1970 err
= create_filter_start(filter_str
, true, &ps
, &filter
);
1972 err
= replace_system_preds(system
, tr
, ps
, filter_str
);
1974 /* System filters just show a default message */
1975 kfree(filter
->filter_string
);
1976 filter
->filter_string
= NULL
;
1978 append_filter_err(ps
, filter
);
1981 create_filter_finish(ps
);
1987 /* caller must hold event_mutex */
1988 int apply_event_filter(struct ftrace_event_file
*file
, char *filter_string
)
1990 struct ftrace_event_call
*call
= file
->event_call
;
1991 struct event_filter
*filter
;
1994 if (!strcmp(strstrip(filter_string
), "0")) {
1995 filter_disable(file
);
1996 filter
= event_filter(file
);
2001 event_clear_filter(file
);
2003 /* Make sure the filter is not being used */
2004 synchronize_sched();
2005 __free_filter(filter
);
2010 err
= create_filter(call
, filter_string
, true, &filter
);
2013 * Always swap the call filter with the new filter
2014 * even if there was an error. If there was an error
2015 * in the filter, we disable the filter and show the error
2019 struct event_filter
*tmp
;
2021 tmp
= event_filter(file
);
2023 event_set_filtered_flag(file
);
2025 filter_disable(file
);
2027 event_set_filter(file
, filter
);
2030 /* Make sure the call is done with the filter */
2031 synchronize_sched();
2039 int apply_subsystem_event_filter(struct ftrace_subsystem_dir
*dir
,
2040 char *filter_string
)
2042 struct event_subsystem
*system
= dir
->subsystem
;
2043 struct trace_array
*tr
= dir
->tr
;
2044 struct event_filter
*filter
;
2047 mutex_lock(&event_mutex
);
2049 /* Make sure the system still has events */
2050 if (!dir
->nr_events
) {
2055 if (!strcmp(strstrip(filter_string
), "0")) {
2056 filter_free_subsystem_preds(system
, tr
);
2057 remove_filter_string(system
->filter
);
2058 filter
= system
->filter
;
2059 system
->filter
= NULL
;
2060 /* Ensure all filters are no longer used */
2061 synchronize_sched();
2062 filter_free_subsystem_filters(system
, tr
);
2063 __free_filter(filter
);
2067 err
= create_system_filter(system
, tr
, filter_string
, &filter
);
2070 * No event actually uses the system filter
2071 * we can free it without synchronize_sched().
2073 __free_filter(system
->filter
);
2074 system
->filter
= filter
;
2077 mutex_unlock(&event_mutex
);
2082 #ifdef CONFIG_PERF_EVENTS
2084 void ftrace_profile_free_filter(struct perf_event
*event
)
2086 struct event_filter
*filter
= event
->filter
;
2088 event
->filter
= NULL
;
2089 __free_filter(filter
);
2092 struct function_filter_data
{
2093 struct ftrace_ops
*ops
;
2098 #ifdef CONFIG_FUNCTION_TRACER
2100 ftrace_function_filter_re(char *buf
, int len
, int *count
)
2102 char *str
, *sep
, **re
;
2104 str
= kstrndup(buf
, len
, GFP_KERNEL
);
2109 * The argv_split function takes white space
2110 * as a separator, so convert ',' into spaces.
2112 while ((sep
= strchr(str
, ',')))
2115 re
= argv_split(GFP_KERNEL
, str
, count
);
2120 static int ftrace_function_set_regexp(struct ftrace_ops
*ops
, int filter
,
2121 int reset
, char *re
, int len
)
2126 ret
= ftrace_set_filter(ops
, re
, len
, reset
);
2128 ret
= ftrace_set_notrace(ops
, re
, len
, reset
);
2133 static int __ftrace_function_set_filter(int filter
, char *buf
, int len
,
2134 struct function_filter_data
*data
)
2136 int i
, re_cnt
, ret
= -EINVAL
;
2140 reset
= filter
? &data
->first_filter
: &data
->first_notrace
;
2143 * The 'ip' field could have multiple filters set, separated
2144 * either by space or comma. We first cut the filter and apply
2145 * all pieces separatelly.
2147 re
= ftrace_function_filter_re(buf
, len
, &re_cnt
);
2151 for (i
= 0; i
< re_cnt
; i
++) {
2152 ret
= ftrace_function_set_regexp(data
->ops
, filter
, *reset
,
2153 re
[i
], strlen(re
[i
]));
2165 static int ftrace_function_check_pred(struct filter_pred
*pred
, int leaf
)
2167 struct ftrace_event_field
*field
= pred
->field
;
2171 * Check the leaf predicate for function trace, verify:
2172 * - only '==' and '!=' is used
2173 * - the 'ip' field is used
2175 if ((pred
->op
!= OP_EQ
) && (pred
->op
!= OP_NE
))
2178 if (strcmp(field
->name
, "ip"))
2182 * Check the non leaf predicate for function trace, verify:
2183 * - only '||' is used
2185 if (pred
->op
!= OP_OR
)
2192 static int ftrace_function_set_filter_cb(enum move_type move
,
2193 struct filter_pred
*pred
,
2194 int *err
, void *data
)
2196 /* Checking the node is valid for function trace. */
2197 if ((move
!= MOVE_DOWN
) ||
2198 (pred
->left
!= FILTER_PRED_INVALID
)) {
2199 *err
= ftrace_function_check_pred(pred
, 0);
2201 *err
= ftrace_function_check_pred(pred
, 1);
2203 return WALK_PRED_ABORT
;
2205 *err
= __ftrace_function_set_filter(pred
->op
== OP_EQ
,
2206 pred
->regex
.pattern
,
2211 return (*err
) ? WALK_PRED_ABORT
: WALK_PRED_DEFAULT
;
2214 static int ftrace_function_set_filter(struct perf_event
*event
,
2215 struct event_filter
*filter
)
2217 struct function_filter_data data
= {
2220 .ops
= &event
->ftrace_ops
,
2223 return walk_pred_tree(filter
->preds
, filter
->root
,
2224 ftrace_function_set_filter_cb
, &data
);
2227 static int ftrace_function_set_filter(struct perf_event
*event
,
2228 struct event_filter
*filter
)
2232 #endif /* CONFIG_FUNCTION_TRACER */
2234 int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
2238 struct event_filter
*filter
;
2239 struct ftrace_event_call
*call
;
2241 mutex_lock(&event_mutex
);
2243 call
= event
->tp_event
;
2253 err
= create_filter(call
, filter_str
, false, &filter
);
2257 if (ftrace_event_is_function(call
))
2258 err
= ftrace_function_set_filter(event
, filter
);
2260 event
->filter
= filter
;
2263 if (err
|| ftrace_event_is_function(call
))
2264 __free_filter(filter
);
2267 mutex_unlock(&event_mutex
);
2272 #endif /* CONFIG_PERF_EVENTS */
2274 #ifdef CONFIG_FTRACE_STARTUP_TEST
2276 #include <linux/types.h>
2277 #include <linux/tracepoint.h>
2279 #define CREATE_TRACE_POINTS
2280 #include "trace_events_filter_test.h"
2282 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2285 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2286 .e = ve, .f = vf, .g = vg, .h = vh }, \
2288 .not_visited = nvisit, \
2293 static struct test_filter_data_t
{
2295 struct ftrace_raw_ftrace_test_filter rec
;
2298 } test_filter_data
[] = {
2299 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2300 "e == 1 && f == 1 && g == 1 && h == 1"
2301 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2302 DATA_REC(NO
, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2303 DATA_REC(NO
, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2305 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2306 "e == 1 || f == 1 || g == 1 || h == 1"
2307 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2308 DATA_REC(YES
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2309 DATA_REC(YES
, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2311 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2312 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2313 DATA_REC(NO
, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2314 DATA_REC(YES
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2315 DATA_REC(YES
, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2316 DATA_REC(NO
, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2318 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2319 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2320 DATA_REC(YES
, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2321 DATA_REC(YES
, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2322 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2324 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2325 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2326 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2327 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2328 DATA_REC(YES
, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2330 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2331 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2332 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2333 DATA_REC(NO
, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2334 DATA_REC(YES
, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2336 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2337 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2338 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2339 DATA_REC(NO
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2340 DATA_REC(NO
, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2342 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2343 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2344 DATA_REC(YES
, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2345 DATA_REC(YES
, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2346 DATA_REC(YES
, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2354 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2356 static int test_pred_visited
;
2358 static int test_pred_visited_fn(struct filter_pred
*pred
, void *event
)
2360 struct ftrace_event_field
*field
= pred
->field
;
2362 test_pred_visited
= 1;
2363 printk(KERN_INFO
"\npred visited %s\n", field
->name
);
2367 static int test_walk_pred_cb(enum move_type move
, struct filter_pred
*pred
,
2368 int *err
, void *data
)
2370 char *fields
= data
;
2372 if ((move
== MOVE_DOWN
) &&
2373 (pred
->left
== FILTER_PRED_INVALID
)) {
2374 struct ftrace_event_field
*field
= pred
->field
;
2377 WARN(1, "all leafs should have field defined");
2378 return WALK_PRED_DEFAULT
;
2380 if (!strchr(fields
, *field
->name
))
2381 return WALK_PRED_DEFAULT
;
2384 pred
->fn
= test_pred_visited_fn
;
2386 return WALK_PRED_DEFAULT
;
2389 static __init
int ftrace_test_event_filter(void)
2393 printk(KERN_INFO
"Testing ftrace filter: ");
2395 for (i
= 0; i
< DATA_CNT
; i
++) {
2396 struct event_filter
*filter
= NULL
;
2397 struct test_filter_data_t
*d
= &test_filter_data
[i
];
2400 err
= create_filter(&event_ftrace_test_filter
, d
->filter
,
2404 "Failed to get filter for '%s', err %d\n",
2406 __free_filter(filter
);
2411 * The preemption disabling is not really needed for self
2412 * tests, but the rcu dereference will complain without it.
2415 if (*d
->not_visited
)
2416 walk_pred_tree(filter
->preds
, filter
->root
,
2420 test_pred_visited
= 0;
2421 err
= filter_match_preds(filter
, &d
->rec
);
2424 __free_filter(filter
);
2426 if (test_pred_visited
) {
2428 "Failed, unwanted pred visited for filter %s\n",
2433 if (err
!= d
->match
) {
2435 "Failed to match filter '%s', expected %d\n",
2436 d
->filter
, d
->match
);
2442 printk(KERN_CONT
"OK\n");
2447 late_initcall(ftrace_test_event_filter
);
2449 #endif /* CONFIG_FTRACE_STARTUP_TEST */