usb: musb: fix compilation breakage introduced by de47725
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_events_filter.c
blob816d3d074979306713836d9447382cb641aecb54
1 /*
2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
27 #include "trace.h"
28 #include "trace_output.h"
30 enum filter_op_ids
32 OP_OR,
33 OP_AND,
34 OP_GLOB,
35 OP_NE,
36 OP_EQ,
37 OP_LT,
38 OP_LE,
39 OP_GT,
40 OP_GE,
41 OP_NONE,
42 OP_OPEN_PAREN,
45 struct filter_op {
46 int id;
47 char *string;
48 int precedence;
51 static struct filter_op filter_ops[] = {
52 { OP_OR, "||", 1 },
53 { OP_AND, "&&", 2 },
54 { OP_GLOB, "~", 4 },
55 { OP_NE, "!=", 4 },
56 { OP_EQ, "==", 4 },
57 { OP_LT, "<", 5 },
58 { OP_LE, "<=", 5 },
59 { OP_GT, ">", 5 },
60 { OP_GE, ">=", 5 },
61 { OP_NONE, "OP_NONE", 0 },
62 { OP_OPEN_PAREN, "(", 0 },
65 enum {
66 FILT_ERR_NONE,
67 FILT_ERR_INVALID_OP,
68 FILT_ERR_UNBALANCED_PAREN,
69 FILT_ERR_TOO_MANY_OPERANDS,
70 FILT_ERR_OPERAND_TOO_LONG,
71 FILT_ERR_FIELD_NOT_FOUND,
72 FILT_ERR_ILLEGAL_FIELD_OP,
73 FILT_ERR_ILLEGAL_INTVAL,
74 FILT_ERR_BAD_SUBSYS_FILTER,
75 FILT_ERR_TOO_MANY_PREDS,
76 FILT_ERR_MISSING_FIELD,
77 FILT_ERR_INVALID_FILTER,
80 static char *err_text[] = {
81 "No error",
82 "Invalid operator",
83 "Unbalanced parens",
84 "Too many operands",
85 "Operand too long",
86 "Field not found",
87 "Illegal operation for field type",
88 "Illegal integer value",
89 "Couldn't find or set field in one of a subsystem's events",
90 "Too many terms in predicate expression",
91 "Missing field name and/or value",
92 "Meaningless filter expression",
95 struct opstack_op {
96 int op;
97 struct list_head list;
100 struct postfix_elt {
101 int op;
102 char *operand;
103 struct list_head list;
106 struct filter_parse_state {
107 struct filter_op *ops;
108 struct list_head opstack;
109 struct list_head postfix;
110 int lasterr;
111 int lasterr_pos;
113 struct {
114 char *string;
115 unsigned int cnt;
116 unsigned int tail;
117 } infix;
119 struct {
120 char string[MAX_FILTER_STR_VAL];
121 int pos;
122 unsigned int tail;
123 } operand;
126 struct pred_stack {
127 struct filter_pred **preds;
128 int index;
131 #define DEFINE_COMPARISON_PRED(type) \
132 static int filter_pred_##type(struct filter_pred *pred, void *event) \
134 type *addr = (type *)(event + pred->offset); \
135 type val = (type)pred->val; \
136 int match = 0; \
138 switch (pred->op) { \
139 case OP_LT: \
140 match = (*addr < val); \
141 break; \
142 case OP_LE: \
143 match = (*addr <= val); \
144 break; \
145 case OP_GT: \
146 match = (*addr > val); \
147 break; \
148 case OP_GE: \
149 match = (*addr >= val); \
150 break; \
151 default: \
152 break; \
155 return match; \
158 #define DEFINE_EQUALITY_PRED(size) \
159 static int filter_pred_##size(struct filter_pred *pred, void *event) \
161 u##size *addr = (u##size *)(event + pred->offset); \
162 u##size val = (u##size)pred->val; \
163 int match; \
165 match = (val == *addr) ^ pred->not; \
167 return match; \
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
184 /* Filter predicate for fixed sized arrays of characters */
185 static int filter_pred_string(struct filter_pred *pred, void *event)
187 char *addr = (char *)(event + pred->offset);
188 int cmp, match;
190 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
192 match = cmp ^ pred->not;
194 return match;
197 /* Filter predicate for char * pointers */
198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
200 char **addr = (char **)(event + pred->offset);
201 int cmp, match;
202 int len = strlen(*addr) + 1; /* including tailing '\0' */
204 cmp = pred->regex.match(*addr, &pred->regex, len);
206 match = cmp ^ pred->not;
208 return match;
212 * Filter predicate for dynamic sized arrays of characters.
213 * These are implemented through a list of strings at the end
214 * of the entry.
215 * Also each of these strings have a field in the entry which
216 * contains its offset from the beginning of the entry.
217 * We have then first to get this field, dereference it
218 * and add it to the address of the entry, and at last we have
219 * the address of the string.
221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
223 u32 str_item = *(u32 *)(event + pred->offset);
224 int str_loc = str_item & 0xffff;
225 int str_len = str_item >> 16;
226 char *addr = (char *)(event + str_loc);
227 int cmp, match;
229 cmp = pred->regex.match(addr, &pred->regex, str_len);
231 match = cmp ^ pred->not;
233 return match;
236 static int filter_pred_none(struct filter_pred *pred, void *event)
238 return 0;
242 * regex_match_foo - Basic regex callbacks
244 * @str: the string to be searched
245 * @r: the regex structure containing the pattern string
246 * @len: the length of the string to be searched (including '\0')
248 * Note:
249 * - @str might not be NULL-terminated if it's of type DYN_STRING
250 * or STATIC_STRING
253 static int regex_match_full(char *str, struct regex *r, int len)
255 if (strncmp(str, r->pattern, len) == 0)
256 return 1;
257 return 0;
260 static int regex_match_front(char *str, struct regex *r, int len)
262 if (strncmp(str, r->pattern, r->len) == 0)
263 return 1;
264 return 0;
267 static int regex_match_middle(char *str, struct regex *r, int len)
269 if (strnstr(str, r->pattern, len))
270 return 1;
271 return 0;
274 static int regex_match_end(char *str, struct regex *r, int len)
276 int strlen = len - 1;
278 if (strlen >= r->len &&
279 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280 return 1;
281 return 0;
285 * filter_parse_regex - parse a basic regex
286 * @buff: the raw regex
287 * @len: length of the regex
288 * @search: will point to the beginning of the string to compare
289 * @not: tell whether the match will have to be inverted
291 * This passes in a buffer containing a regex and this function will
292 * set search to point to the search part of the buffer and
293 * return the type of search it is (see enum above).
294 * This does modify buff.
296 * Returns enum type.
297 * search returns the pointer to use for comparison.
298 * not returns 1 if buff started with a '!'
299 * 0 otherwise.
301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
303 int type = MATCH_FULL;
304 int i;
306 if (buff[0] == '!') {
307 *not = 1;
308 buff++;
309 len--;
310 } else
311 *not = 0;
313 *search = buff;
315 for (i = 0; i < len; i++) {
316 if (buff[i] == '*') {
317 if (!i) {
318 *search = buff + 1;
319 type = MATCH_END_ONLY;
320 } else {
321 if (type == MATCH_END_ONLY)
322 type = MATCH_MIDDLE_ONLY;
323 else
324 type = MATCH_FRONT_ONLY;
325 buff[i] = 0;
326 break;
331 return type;
334 static void filter_build_regex(struct filter_pred *pred)
336 struct regex *r = &pred->regex;
337 char *search;
338 enum regex_type type = MATCH_FULL;
339 int not = 0;
341 if (pred->op == OP_GLOB) {
342 type = filter_parse_regex(r->pattern, r->len, &search, &not);
343 r->len = strlen(search);
344 memmove(r->pattern, search, r->len+1);
347 switch (type) {
348 case MATCH_FULL:
349 r->match = regex_match_full;
350 break;
351 case MATCH_FRONT_ONLY:
352 r->match = regex_match_front;
353 break;
354 case MATCH_MIDDLE_ONLY:
355 r->match = regex_match_middle;
356 break;
357 case MATCH_END_ONLY:
358 r->match = regex_match_end;
359 break;
362 pred->not ^= not;
365 enum move_type {
366 MOVE_DOWN,
367 MOVE_UP_FROM_LEFT,
368 MOVE_UP_FROM_RIGHT
371 static struct filter_pred *
372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373 int index, enum move_type *move)
375 if (pred->parent & FILTER_PRED_IS_RIGHT)
376 *move = MOVE_UP_FROM_RIGHT;
377 else
378 *move = MOVE_UP_FROM_LEFT;
379 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
381 return pred;
384 enum walk_return {
385 WALK_PRED_ABORT,
386 WALK_PRED_PARENT,
387 WALK_PRED_DEFAULT,
390 typedef int (*filter_pred_walkcb_t) (enum move_type move,
391 struct filter_pred *pred,
392 int *err, void *data);
394 static int walk_pred_tree(struct filter_pred *preds,
395 struct filter_pred *root,
396 filter_pred_walkcb_t cb, void *data)
398 struct filter_pred *pred = root;
399 enum move_type move = MOVE_DOWN;
400 int done = 0;
402 if (!preds)
403 return -EINVAL;
405 do {
406 int err = 0, ret;
408 ret = cb(move, pred, &err, data);
409 if (ret == WALK_PRED_ABORT)
410 return err;
411 if (ret == WALK_PRED_PARENT)
412 goto get_parent;
414 switch (move) {
415 case MOVE_DOWN:
416 if (pred->left != FILTER_PRED_INVALID) {
417 pred = &preds[pred->left];
418 continue;
420 goto get_parent;
421 case MOVE_UP_FROM_LEFT:
422 pred = &preds[pred->right];
423 move = MOVE_DOWN;
424 continue;
425 case MOVE_UP_FROM_RIGHT:
426 get_parent:
427 if (pred == root)
428 break;
429 pred = get_pred_parent(pred, preds,
430 pred->parent,
431 &move);
432 continue;
434 done = 1;
435 } while (!done);
437 /* We are fine. */
438 return 0;
442 * A series of AND or ORs where found together. Instead of
443 * climbing up and down the tree branches, an array of the
444 * ops were made in order of checks. We can just move across
445 * the array and short circuit if needed.
447 static int process_ops(struct filter_pred *preds,
448 struct filter_pred *op, void *rec)
450 struct filter_pred *pred;
451 int match = 0;
452 int type;
453 int i;
456 * Micro-optimization: We set type to true if op
457 * is an OR and false otherwise (AND). Then we
458 * just need to test if the match is equal to
459 * the type, and if it is, we can short circuit the
460 * rest of the checks:
462 * if ((match && op->op == OP_OR) ||
463 * (!match && op->op == OP_AND))
464 * return match;
466 type = op->op == OP_OR;
468 for (i = 0; i < op->val; i++) {
469 pred = &preds[op->ops[i]];
470 if (!WARN_ON_ONCE(!pred->fn))
471 match = pred->fn(pred, rec);
472 if (!!match == type)
473 return match;
475 return match;
478 struct filter_match_preds_data {
479 struct filter_pred *preds;
480 int match;
481 void *rec;
484 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
485 int *err, void *data)
487 struct filter_match_preds_data *d = data;
489 *err = 0;
490 switch (move) {
491 case MOVE_DOWN:
492 /* only AND and OR have children */
493 if (pred->left != FILTER_PRED_INVALID) {
494 /* If ops is set, then it was folded. */
495 if (!pred->ops)
496 return WALK_PRED_DEFAULT;
497 /* We can treat folded ops as a leaf node */
498 d->match = process_ops(d->preds, pred, d->rec);
499 } else {
500 if (!WARN_ON_ONCE(!pred->fn))
501 d->match = pred->fn(pred, d->rec);
504 return WALK_PRED_PARENT;
505 case MOVE_UP_FROM_LEFT:
507 * Check for short circuits.
509 * Optimization: !!match == (pred->op == OP_OR)
510 * is the same as:
511 * if ((match && pred->op == OP_OR) ||
512 * (!match && pred->op == OP_AND))
514 if (!!d->match == (pred->op == OP_OR))
515 return WALK_PRED_PARENT;
516 break;
517 case MOVE_UP_FROM_RIGHT:
518 break;
521 return WALK_PRED_DEFAULT;
524 /* return 1 if event matches, 0 otherwise (discard) */
525 int filter_match_preds(struct event_filter *filter, void *rec)
527 struct filter_pred *preds;
528 struct filter_pred *root;
529 struct filter_match_preds_data data = {
530 /* match is currently meaningless */
531 .match = -1,
532 .rec = rec,
534 int n_preds, ret;
536 /* no filter is considered a match */
537 if (!filter)
538 return 1;
540 n_preds = filter->n_preds;
541 if (!n_preds)
542 return 1;
545 * n_preds, root and filter->preds are protect with preemption disabled.
547 root = rcu_dereference_sched(filter->root);
548 if (!root)
549 return 1;
551 data.preds = preds = rcu_dereference_sched(filter->preds);
552 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
553 WARN_ON(ret);
554 return data.match;
556 EXPORT_SYMBOL_GPL(filter_match_preds);
558 static void parse_error(struct filter_parse_state *ps, int err, int pos)
560 ps->lasterr = err;
561 ps->lasterr_pos = pos;
564 static void remove_filter_string(struct event_filter *filter)
566 if (!filter)
567 return;
569 kfree(filter->filter_string);
570 filter->filter_string = NULL;
573 static int replace_filter_string(struct event_filter *filter,
574 char *filter_string)
576 kfree(filter->filter_string);
577 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
578 if (!filter->filter_string)
579 return -ENOMEM;
581 return 0;
584 static int append_filter_string(struct event_filter *filter,
585 char *string)
587 int newlen;
588 char *new_filter_string;
590 BUG_ON(!filter->filter_string);
591 newlen = strlen(filter->filter_string) + strlen(string) + 1;
592 new_filter_string = kmalloc(newlen, GFP_KERNEL);
593 if (!new_filter_string)
594 return -ENOMEM;
596 strcpy(new_filter_string, filter->filter_string);
597 strcat(new_filter_string, string);
598 kfree(filter->filter_string);
599 filter->filter_string = new_filter_string;
601 return 0;
604 static void append_filter_err(struct filter_parse_state *ps,
605 struct event_filter *filter)
607 int pos = ps->lasterr_pos;
608 char *buf, *pbuf;
610 buf = (char *)__get_free_page(GFP_TEMPORARY);
611 if (!buf)
612 return;
614 append_filter_string(filter, "\n");
615 memset(buf, ' ', PAGE_SIZE);
616 if (pos > PAGE_SIZE - 128)
617 pos = 0;
618 buf[pos] = '^';
619 pbuf = &buf[pos] + 1;
621 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
622 append_filter_string(filter, buf);
623 free_page((unsigned long) buf);
626 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
628 struct event_filter *filter;
630 mutex_lock(&event_mutex);
631 filter = call->filter;
632 if (filter && filter->filter_string)
633 trace_seq_printf(s, "%s\n", filter->filter_string);
634 else
635 trace_seq_printf(s, "none\n");
636 mutex_unlock(&event_mutex);
639 void print_subsystem_event_filter(struct event_subsystem *system,
640 struct trace_seq *s)
642 struct event_filter *filter;
644 mutex_lock(&event_mutex);
645 filter = system->filter;
646 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else
649 trace_seq_printf(s, "none\n");
650 mutex_unlock(&event_mutex);
653 static struct ftrace_event_field *
654 __find_event_field(struct list_head *head, char *name)
656 struct ftrace_event_field *field;
658 list_for_each_entry(field, head, link) {
659 if (!strcmp(field->name, name))
660 return field;
663 return NULL;
666 static struct ftrace_event_field *
667 find_event_field(struct ftrace_event_call *call, char *name)
669 struct ftrace_event_field *field;
670 struct list_head *head;
672 field = __find_event_field(&ftrace_common_fields, name);
673 if (field)
674 return field;
676 head = trace_get_fields(call);
677 return __find_event_field(head, name);
680 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
682 stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
683 if (!stack->preds)
684 return -ENOMEM;
685 stack->index = n_preds;
686 return 0;
689 static void __free_pred_stack(struct pred_stack *stack)
691 kfree(stack->preds);
692 stack->index = 0;
695 static int __push_pred_stack(struct pred_stack *stack,
696 struct filter_pred *pred)
698 int index = stack->index;
700 if (WARN_ON(index == 0))
701 return -ENOSPC;
703 stack->preds[--index] = pred;
704 stack->index = index;
705 return 0;
708 static struct filter_pred *
709 __pop_pred_stack(struct pred_stack *stack)
711 struct filter_pred *pred;
712 int index = stack->index;
714 pred = stack->preds[index++];
715 if (!pred)
716 return NULL;
718 stack->index = index;
719 return pred;
722 static int filter_set_pred(struct event_filter *filter,
723 int idx,
724 struct pred_stack *stack,
725 struct filter_pred *src)
727 struct filter_pred *dest = &filter->preds[idx];
728 struct filter_pred *left;
729 struct filter_pred *right;
731 *dest = *src;
732 dest->index = idx;
734 if (dest->op == OP_OR || dest->op == OP_AND) {
735 right = __pop_pred_stack(stack);
736 left = __pop_pred_stack(stack);
737 if (!left || !right)
738 return -EINVAL;
740 * If both children can be folded
741 * and they are the same op as this op or a leaf,
742 * then this op can be folded.
744 if (left->index & FILTER_PRED_FOLD &&
745 (left->op == dest->op ||
746 left->left == FILTER_PRED_INVALID) &&
747 right->index & FILTER_PRED_FOLD &&
748 (right->op == dest->op ||
749 right->left == FILTER_PRED_INVALID))
750 dest->index |= FILTER_PRED_FOLD;
752 dest->left = left->index & ~FILTER_PRED_FOLD;
753 dest->right = right->index & ~FILTER_PRED_FOLD;
754 left->parent = dest->index & ~FILTER_PRED_FOLD;
755 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
756 } else {
758 * Make dest->left invalid to be used as a quick
759 * way to know this is a leaf node.
761 dest->left = FILTER_PRED_INVALID;
763 /* All leafs allow folding the parent ops. */
764 dest->index |= FILTER_PRED_FOLD;
767 return __push_pred_stack(stack, dest);
770 static void __free_preds(struct event_filter *filter)
772 if (filter->preds) {
773 kfree(filter->preds);
774 filter->preds = NULL;
776 filter->a_preds = 0;
777 filter->n_preds = 0;
780 static void filter_disable(struct ftrace_event_call *call)
782 call->flags &= ~TRACE_EVENT_FL_FILTERED;
785 static void __free_filter(struct event_filter *filter)
787 if (!filter)
788 return;
790 __free_preds(filter);
791 kfree(filter->filter_string);
792 kfree(filter);
796 * Called when destroying the ftrace_event_call.
797 * The call is being freed, so we do not need to worry about
798 * the call being currently used. This is for module code removing
799 * the tracepoints from within it.
801 void destroy_preds(struct ftrace_event_call *call)
803 __free_filter(call->filter);
804 call->filter = NULL;
807 static struct event_filter *__alloc_filter(void)
809 struct event_filter *filter;
811 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
812 return filter;
815 static int __alloc_preds(struct event_filter *filter, int n_preds)
817 struct filter_pred *pred;
818 int i;
820 if (filter->preds)
821 __free_preds(filter);
823 filter->preds =
824 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
826 if (!filter->preds)
827 return -ENOMEM;
829 filter->a_preds = n_preds;
830 filter->n_preds = 0;
832 for (i = 0; i < n_preds; i++) {
833 pred = &filter->preds[i];
834 pred->fn = filter_pred_none;
837 return 0;
840 static void filter_free_subsystem_preds(struct event_subsystem *system)
842 struct ftrace_event_call *call;
844 list_for_each_entry(call, &ftrace_events, list) {
845 if (strcmp(call->class->system, system->name) != 0)
846 continue;
848 filter_disable(call);
849 remove_filter_string(call->filter);
853 static void filter_free_subsystem_filters(struct event_subsystem *system)
855 struct ftrace_event_call *call;
857 list_for_each_entry(call, &ftrace_events, list) {
858 if (strcmp(call->class->system, system->name) != 0)
859 continue;
860 __free_filter(call->filter);
861 call->filter = NULL;
865 static int filter_add_pred(struct filter_parse_state *ps,
866 struct event_filter *filter,
867 struct filter_pred *pred,
868 struct pred_stack *stack)
870 int err;
872 if (WARN_ON(filter->n_preds == filter->a_preds)) {
873 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
874 return -ENOSPC;
877 err = filter_set_pred(filter, filter->n_preds, stack, pred);
878 if (err)
879 return err;
881 filter->n_preds++;
883 return 0;
886 int filter_assign_type(const char *type)
888 if (strstr(type, "__data_loc") && strstr(type, "char"))
889 return FILTER_DYN_STRING;
891 if (strchr(type, '[') && strstr(type, "char"))
892 return FILTER_STATIC_STRING;
894 return FILTER_OTHER;
897 static bool is_string_field(struct ftrace_event_field *field)
899 return field->filter_type == FILTER_DYN_STRING ||
900 field->filter_type == FILTER_STATIC_STRING ||
901 field->filter_type == FILTER_PTR_STRING;
904 static int is_legal_op(struct ftrace_event_field *field, int op)
906 if (is_string_field(field) &&
907 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
908 return 0;
909 if (!is_string_field(field) && op == OP_GLOB)
910 return 0;
912 return 1;
915 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
916 int field_is_signed)
918 filter_pred_fn_t fn = NULL;
920 switch (field_size) {
921 case 8:
922 if (op == OP_EQ || op == OP_NE)
923 fn = filter_pred_64;
924 else if (field_is_signed)
925 fn = filter_pred_s64;
926 else
927 fn = filter_pred_u64;
928 break;
929 case 4:
930 if (op == OP_EQ || op == OP_NE)
931 fn = filter_pred_32;
932 else if (field_is_signed)
933 fn = filter_pred_s32;
934 else
935 fn = filter_pred_u32;
936 break;
937 case 2:
938 if (op == OP_EQ || op == OP_NE)
939 fn = filter_pred_16;
940 else if (field_is_signed)
941 fn = filter_pred_s16;
942 else
943 fn = filter_pred_u16;
944 break;
945 case 1:
946 if (op == OP_EQ || op == OP_NE)
947 fn = filter_pred_8;
948 else if (field_is_signed)
949 fn = filter_pred_s8;
950 else
951 fn = filter_pred_u8;
952 break;
955 return fn;
958 static int init_pred(struct filter_parse_state *ps,
959 struct ftrace_event_field *field,
960 struct filter_pred *pred)
963 filter_pred_fn_t fn = filter_pred_none;
964 unsigned long long val;
965 int ret;
967 pred->offset = field->offset;
969 if (!is_legal_op(field, pred->op)) {
970 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
971 return -EINVAL;
974 if (is_string_field(field)) {
975 filter_build_regex(pred);
977 if (field->filter_type == FILTER_STATIC_STRING) {
978 fn = filter_pred_string;
979 pred->regex.field_len = field->size;
980 } else if (field->filter_type == FILTER_DYN_STRING)
981 fn = filter_pred_strloc;
982 else
983 fn = filter_pred_pchar;
984 } else {
985 if (field->is_signed)
986 ret = strict_strtoll(pred->regex.pattern, 0, &val);
987 else
988 ret = strict_strtoull(pred->regex.pattern, 0, &val);
989 if (ret) {
990 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
991 return -EINVAL;
993 pred->val = val;
995 fn = select_comparison_fn(pred->op, field->size,
996 field->is_signed);
997 if (!fn) {
998 parse_error(ps, FILT_ERR_INVALID_OP, 0);
999 return -EINVAL;
1003 if (pred->op == OP_NE)
1004 pred->not = 1;
1006 pred->fn = fn;
1007 return 0;
1010 static void parse_init(struct filter_parse_state *ps,
1011 struct filter_op *ops,
1012 char *infix_string)
1014 memset(ps, '\0', sizeof(*ps));
1016 ps->infix.string = infix_string;
1017 ps->infix.cnt = strlen(infix_string);
1018 ps->ops = ops;
1020 INIT_LIST_HEAD(&ps->opstack);
1021 INIT_LIST_HEAD(&ps->postfix);
1024 static char infix_next(struct filter_parse_state *ps)
1026 ps->infix.cnt--;
1028 return ps->infix.string[ps->infix.tail++];
1031 static char infix_peek(struct filter_parse_state *ps)
1033 if (ps->infix.tail == strlen(ps->infix.string))
1034 return 0;
1036 return ps->infix.string[ps->infix.tail];
1039 static void infix_advance(struct filter_parse_state *ps)
1041 ps->infix.cnt--;
1042 ps->infix.tail++;
1045 static inline int is_precedence_lower(struct filter_parse_state *ps,
1046 int a, int b)
1048 return ps->ops[a].precedence < ps->ops[b].precedence;
1051 static inline int is_op_char(struct filter_parse_state *ps, char c)
1053 int i;
1055 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1056 if (ps->ops[i].string[0] == c)
1057 return 1;
1060 return 0;
1063 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1065 char nextc = infix_peek(ps);
1066 char opstr[3];
1067 int i;
1069 opstr[0] = firstc;
1070 opstr[1] = nextc;
1071 opstr[2] = '\0';
1073 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1074 if (!strcmp(opstr, ps->ops[i].string)) {
1075 infix_advance(ps);
1076 return ps->ops[i].id;
1080 opstr[1] = '\0';
1082 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1083 if (!strcmp(opstr, ps->ops[i].string))
1084 return ps->ops[i].id;
1087 return OP_NONE;
1090 static inline void clear_operand_string(struct filter_parse_state *ps)
1092 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1093 ps->operand.tail = 0;
1096 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1098 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1099 return -EINVAL;
1101 ps->operand.string[ps->operand.tail++] = c;
1103 return 0;
1106 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1108 struct opstack_op *opstack_op;
1110 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1111 if (!opstack_op)
1112 return -ENOMEM;
1114 opstack_op->op = op;
1115 list_add(&opstack_op->list, &ps->opstack);
1117 return 0;
1120 static int filter_opstack_empty(struct filter_parse_state *ps)
1122 return list_empty(&ps->opstack);
1125 static int filter_opstack_top(struct filter_parse_state *ps)
1127 struct opstack_op *opstack_op;
1129 if (filter_opstack_empty(ps))
1130 return OP_NONE;
1132 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1134 return opstack_op->op;
1137 static int filter_opstack_pop(struct filter_parse_state *ps)
1139 struct opstack_op *opstack_op;
1140 int op;
1142 if (filter_opstack_empty(ps))
1143 return OP_NONE;
1145 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1146 op = opstack_op->op;
1147 list_del(&opstack_op->list);
1149 kfree(opstack_op);
1151 return op;
1154 static void filter_opstack_clear(struct filter_parse_state *ps)
1156 while (!filter_opstack_empty(ps))
1157 filter_opstack_pop(ps);
1160 static char *curr_operand(struct filter_parse_state *ps)
1162 return ps->operand.string;
1165 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1167 struct postfix_elt *elt;
1169 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1170 if (!elt)
1171 return -ENOMEM;
1173 elt->op = OP_NONE;
1174 elt->operand = kstrdup(operand, GFP_KERNEL);
1175 if (!elt->operand) {
1176 kfree(elt);
1177 return -ENOMEM;
1180 list_add_tail(&elt->list, &ps->postfix);
1182 return 0;
1185 static int postfix_append_op(struct filter_parse_state *ps, int op)
1187 struct postfix_elt *elt;
1189 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1190 if (!elt)
1191 return -ENOMEM;
1193 elt->op = op;
1194 elt->operand = NULL;
1196 list_add_tail(&elt->list, &ps->postfix);
1198 return 0;
1201 static void postfix_clear(struct filter_parse_state *ps)
1203 struct postfix_elt *elt;
1205 while (!list_empty(&ps->postfix)) {
1206 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1207 list_del(&elt->list);
1208 kfree(elt->operand);
1209 kfree(elt);
1213 static int filter_parse(struct filter_parse_state *ps)
1215 int in_string = 0;
1216 int op, top_op;
1217 char ch;
1219 while ((ch = infix_next(ps))) {
1220 if (ch == '"') {
1221 in_string ^= 1;
1222 continue;
1225 if (in_string)
1226 goto parse_operand;
1228 if (isspace(ch))
1229 continue;
1231 if (is_op_char(ps, ch)) {
1232 op = infix_get_op(ps, ch);
1233 if (op == OP_NONE) {
1234 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1235 return -EINVAL;
1238 if (strlen(curr_operand(ps))) {
1239 postfix_append_operand(ps, curr_operand(ps));
1240 clear_operand_string(ps);
1243 while (!filter_opstack_empty(ps)) {
1244 top_op = filter_opstack_top(ps);
1245 if (!is_precedence_lower(ps, top_op, op)) {
1246 top_op = filter_opstack_pop(ps);
1247 postfix_append_op(ps, top_op);
1248 continue;
1250 break;
1253 filter_opstack_push(ps, op);
1254 continue;
1257 if (ch == '(') {
1258 filter_opstack_push(ps, OP_OPEN_PAREN);
1259 continue;
1262 if (ch == ')') {
1263 if (strlen(curr_operand(ps))) {
1264 postfix_append_operand(ps, curr_operand(ps));
1265 clear_operand_string(ps);
1268 top_op = filter_opstack_pop(ps);
1269 while (top_op != OP_NONE) {
1270 if (top_op == OP_OPEN_PAREN)
1271 break;
1272 postfix_append_op(ps, top_op);
1273 top_op = filter_opstack_pop(ps);
1275 if (top_op == OP_NONE) {
1276 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1277 return -EINVAL;
1279 continue;
1281 parse_operand:
1282 if (append_operand_char(ps, ch)) {
1283 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1284 return -EINVAL;
1288 if (strlen(curr_operand(ps)))
1289 postfix_append_operand(ps, curr_operand(ps));
1291 while (!filter_opstack_empty(ps)) {
1292 top_op = filter_opstack_pop(ps);
1293 if (top_op == OP_NONE)
1294 break;
1295 if (top_op == OP_OPEN_PAREN) {
1296 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1297 return -EINVAL;
1299 postfix_append_op(ps, top_op);
1302 return 0;
1305 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1306 struct ftrace_event_call *call,
1307 int op, char *operand1, char *operand2)
1309 struct ftrace_event_field *field;
1310 static struct filter_pred pred;
1312 memset(&pred, 0, sizeof(pred));
1313 pred.op = op;
1315 if (op == OP_AND || op == OP_OR)
1316 return &pred;
1318 if (!operand1 || !operand2) {
1319 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1320 return NULL;
1323 field = find_event_field(call, operand1);
1324 if (!field) {
1325 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1326 return NULL;
1329 strcpy(pred.regex.pattern, operand2);
1330 pred.regex.len = strlen(pred.regex.pattern);
1332 #ifdef CONFIG_FTRACE_STARTUP_TEST
1333 pred.field = field;
1334 #endif
1335 return init_pred(ps, field, &pred) ? NULL : &pred;
1338 static int check_preds(struct filter_parse_state *ps)
1340 int n_normal_preds = 0, n_logical_preds = 0;
1341 struct postfix_elt *elt;
1343 list_for_each_entry(elt, &ps->postfix, list) {
1344 if (elt->op == OP_NONE)
1345 continue;
1347 if (elt->op == OP_AND || elt->op == OP_OR) {
1348 n_logical_preds++;
1349 continue;
1351 n_normal_preds++;
1354 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1355 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1356 return -EINVAL;
1359 return 0;
1362 static int count_preds(struct filter_parse_state *ps)
1364 struct postfix_elt *elt;
1365 int n_preds = 0;
1367 list_for_each_entry(elt, &ps->postfix, list) {
1368 if (elt->op == OP_NONE)
1369 continue;
1370 n_preds++;
1373 return n_preds;
1376 struct check_pred_data {
1377 int count;
1378 int max;
1381 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1382 int *err, void *data)
1384 struct check_pred_data *d = data;
1386 if (WARN_ON(d->count++ > d->max)) {
1387 *err = -EINVAL;
1388 return WALK_PRED_ABORT;
1390 return WALK_PRED_DEFAULT;
1394 * The tree is walked at filtering of an event. If the tree is not correctly
1395 * built, it may cause an infinite loop. Check here that the tree does
1396 * indeed terminate.
1398 static int check_pred_tree(struct event_filter *filter,
1399 struct filter_pred *root)
1401 struct check_pred_data data = {
1403 * The max that we can hit a node is three times.
1404 * Once going down, once coming up from left, and
1405 * once coming up from right. This is more than enough
1406 * since leafs are only hit a single time.
1408 .max = 3 * filter->n_preds,
1409 .count = 0,
1412 return walk_pred_tree(filter->preds, root,
1413 check_pred_tree_cb, &data);
1416 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1417 int *err, void *data)
1419 int *count = data;
1421 if ((move == MOVE_DOWN) &&
1422 (pred->left == FILTER_PRED_INVALID))
1423 (*count)++;
1425 return WALK_PRED_DEFAULT;
1428 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1430 int count = 0, ret;
1432 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1433 WARN_ON(ret);
1434 return count;
1437 struct fold_pred_data {
1438 struct filter_pred *root;
1439 int count;
1440 int children;
1443 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1444 int *err, void *data)
1446 struct fold_pred_data *d = data;
1447 struct filter_pred *root = d->root;
1449 if (move != MOVE_DOWN)
1450 return WALK_PRED_DEFAULT;
1451 if (pred->left != FILTER_PRED_INVALID)
1452 return WALK_PRED_DEFAULT;
1454 if (WARN_ON(d->count == d->children)) {
1455 *err = -EINVAL;
1456 return WALK_PRED_ABORT;
1459 pred->index &= ~FILTER_PRED_FOLD;
1460 root->ops[d->count++] = pred->index;
1461 return WALK_PRED_DEFAULT;
1464 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1466 struct fold_pred_data data = {
1467 .root = root,
1468 .count = 0,
1470 int children;
1472 /* No need to keep the fold flag */
1473 root->index &= ~FILTER_PRED_FOLD;
1475 /* If the root is a leaf then do nothing */
1476 if (root->left == FILTER_PRED_INVALID)
1477 return 0;
1479 /* count the children */
1480 children = count_leafs(preds, &preds[root->left]);
1481 children += count_leafs(preds, &preds[root->right]);
1483 root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1484 if (!root->ops)
1485 return -ENOMEM;
1487 root->val = children;
1488 data.children = children;
1489 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1492 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1493 int *err, void *data)
1495 struct filter_pred *preds = data;
1497 if (move != MOVE_DOWN)
1498 return WALK_PRED_DEFAULT;
1499 if (!(pred->index & FILTER_PRED_FOLD))
1500 return WALK_PRED_DEFAULT;
1502 *err = fold_pred(preds, pred);
1503 if (*err)
1504 return WALK_PRED_ABORT;
1506 /* eveyrhing below is folded, continue with parent */
1507 return WALK_PRED_PARENT;
1511 * To optimize the processing of the ops, if we have several "ors" or
1512 * "ands" together, we can put them in an array and process them all
1513 * together speeding up the filter logic.
1515 static int fold_pred_tree(struct event_filter *filter,
1516 struct filter_pred *root)
1518 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1519 filter->preds);
1522 static int replace_preds(struct ftrace_event_call *call,
1523 struct event_filter *filter,
1524 struct filter_parse_state *ps,
1525 char *filter_string,
1526 bool dry_run)
1528 char *operand1 = NULL, *operand2 = NULL;
1529 struct filter_pred *pred;
1530 struct filter_pred *root;
1531 struct postfix_elt *elt;
1532 struct pred_stack stack = { }; /* init to NULL */
1533 int err;
1534 int n_preds = 0;
1536 n_preds = count_preds(ps);
1537 if (n_preds >= MAX_FILTER_PRED) {
1538 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1539 return -ENOSPC;
1542 err = check_preds(ps);
1543 if (err)
1544 return err;
1546 if (!dry_run) {
1547 err = __alloc_pred_stack(&stack, n_preds);
1548 if (err)
1549 return err;
1550 err = __alloc_preds(filter, n_preds);
1551 if (err)
1552 goto fail;
1555 n_preds = 0;
1556 list_for_each_entry(elt, &ps->postfix, list) {
1557 if (elt->op == OP_NONE) {
1558 if (!operand1)
1559 operand1 = elt->operand;
1560 else if (!operand2)
1561 operand2 = elt->operand;
1562 else {
1563 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1564 err = -EINVAL;
1565 goto fail;
1567 continue;
1570 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1571 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1572 err = -ENOSPC;
1573 goto fail;
1576 pred = create_pred(ps, call, elt->op, operand1, operand2);
1577 if (!pred) {
1578 err = -EINVAL;
1579 goto fail;
1582 if (!dry_run) {
1583 err = filter_add_pred(ps, filter, pred, &stack);
1584 if (err)
1585 goto fail;
1588 operand1 = operand2 = NULL;
1591 if (!dry_run) {
1592 /* We should have one item left on the stack */
1593 pred = __pop_pred_stack(&stack);
1594 if (!pred)
1595 return -EINVAL;
1596 /* This item is where we start from in matching */
1597 root = pred;
1598 /* Make sure the stack is empty */
1599 pred = __pop_pred_stack(&stack);
1600 if (WARN_ON(pred)) {
1601 err = -EINVAL;
1602 filter->root = NULL;
1603 goto fail;
1605 err = check_pred_tree(filter, root);
1606 if (err)
1607 goto fail;
1609 /* Optimize the tree */
1610 err = fold_pred_tree(filter, root);
1611 if (err)
1612 goto fail;
1614 /* We don't set root until we know it works */
1615 barrier();
1616 filter->root = root;
1619 err = 0;
1620 fail:
1621 __free_pred_stack(&stack);
1622 return err;
1625 struct filter_list {
1626 struct list_head list;
1627 struct event_filter *filter;
1630 static int replace_system_preds(struct event_subsystem *system,
1631 struct filter_parse_state *ps,
1632 char *filter_string)
1634 struct ftrace_event_call *call;
1635 struct filter_list *filter_item;
1636 struct filter_list *tmp;
1637 LIST_HEAD(filter_list);
1638 bool fail = true;
1639 int err;
1641 list_for_each_entry(call, &ftrace_events, list) {
1643 if (strcmp(call->class->system, system->name) != 0)
1644 continue;
1647 * Try to see if the filter can be applied
1648 * (filter arg is ignored on dry_run)
1650 err = replace_preds(call, NULL, ps, filter_string, true);
1651 if (err)
1652 goto fail;
1655 list_for_each_entry(call, &ftrace_events, list) {
1656 struct event_filter *filter;
1658 if (strcmp(call->class->system, system->name) != 0)
1659 continue;
1661 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1662 if (!filter_item)
1663 goto fail_mem;
1665 list_add_tail(&filter_item->list, &filter_list);
1667 filter_item->filter = __alloc_filter();
1668 if (!filter_item->filter)
1669 goto fail_mem;
1670 filter = filter_item->filter;
1672 /* Can only fail on no memory */
1673 err = replace_filter_string(filter, filter_string);
1674 if (err)
1675 goto fail_mem;
1677 err = replace_preds(call, filter, ps, filter_string, false);
1678 if (err) {
1679 filter_disable(call);
1680 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1681 append_filter_err(ps, filter);
1682 } else
1683 call->flags |= TRACE_EVENT_FL_FILTERED;
1685 * Regardless of if this returned an error, we still
1686 * replace the filter for the call.
1688 filter = call->filter;
1689 call->filter = filter_item->filter;
1690 filter_item->filter = filter;
1692 fail = false;
1695 if (fail)
1696 goto fail;
1699 * The calls can still be using the old filters.
1700 * Do a synchronize_sched() to ensure all calls are
1701 * done with them before we free them.
1703 synchronize_sched();
1704 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1705 __free_filter(filter_item->filter);
1706 list_del(&filter_item->list);
1707 kfree(filter_item);
1709 return 0;
1710 fail:
1711 /* No call succeeded */
1712 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1713 list_del(&filter_item->list);
1714 kfree(filter_item);
1716 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1717 return -EINVAL;
1718 fail_mem:
1719 /* If any call succeeded, we still need to sync */
1720 if (!fail)
1721 synchronize_sched();
1722 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1723 __free_filter(filter_item->filter);
1724 list_del(&filter_item->list);
1725 kfree(filter_item);
1727 return -ENOMEM;
1730 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1732 struct filter_parse_state *ps;
1733 struct event_filter *filter;
1734 struct event_filter *tmp;
1735 int err = 0;
1737 mutex_lock(&event_mutex);
1739 if (!strcmp(strstrip(filter_string), "0")) {
1740 filter_disable(call);
1741 filter = call->filter;
1742 if (!filter)
1743 goto out_unlock;
1744 call->filter = NULL;
1745 /* Make sure the filter is not being used */
1746 synchronize_sched();
1747 __free_filter(filter);
1748 goto out_unlock;
1751 err = -ENOMEM;
1752 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1753 if (!ps)
1754 goto out_unlock;
1756 filter = __alloc_filter();
1757 if (!filter) {
1758 kfree(ps);
1759 goto out_unlock;
1762 replace_filter_string(filter, filter_string);
1764 parse_init(ps, filter_ops, filter_string);
1765 err = filter_parse(ps);
1766 if (err) {
1767 append_filter_err(ps, filter);
1768 goto out;
1771 err = replace_preds(call, filter, ps, filter_string, false);
1772 if (err) {
1773 filter_disable(call);
1774 append_filter_err(ps, filter);
1775 } else
1776 call->flags |= TRACE_EVENT_FL_FILTERED;
1777 out:
1779 * Always swap the call filter with the new filter
1780 * even if there was an error. If there was an error
1781 * in the filter, we disable the filter and show the error
1782 * string
1784 tmp = call->filter;
1785 call->filter = filter;
1786 if (tmp) {
1787 /* Make sure the call is done with the filter */
1788 synchronize_sched();
1789 __free_filter(tmp);
1791 filter_opstack_clear(ps);
1792 postfix_clear(ps);
1793 kfree(ps);
1794 out_unlock:
1795 mutex_unlock(&event_mutex);
1797 return err;
1800 int apply_subsystem_event_filter(struct event_subsystem *system,
1801 char *filter_string)
1803 struct filter_parse_state *ps;
1804 struct event_filter *filter;
1805 int err = 0;
1807 mutex_lock(&event_mutex);
1809 /* Make sure the system still has events */
1810 if (!system->nr_events) {
1811 err = -ENODEV;
1812 goto out_unlock;
1815 if (!strcmp(strstrip(filter_string), "0")) {
1816 filter_free_subsystem_preds(system);
1817 remove_filter_string(system->filter);
1818 filter = system->filter;
1819 system->filter = NULL;
1820 /* Ensure all filters are no longer used */
1821 synchronize_sched();
1822 filter_free_subsystem_filters(system);
1823 __free_filter(filter);
1824 goto out_unlock;
1827 err = -ENOMEM;
1828 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1829 if (!ps)
1830 goto out_unlock;
1832 filter = __alloc_filter();
1833 if (!filter)
1834 goto out;
1836 replace_filter_string(filter, filter_string);
1838 * No event actually uses the system filter
1839 * we can free it without synchronize_sched().
1841 __free_filter(system->filter);
1842 system->filter = filter;
1844 parse_init(ps, filter_ops, filter_string);
1845 err = filter_parse(ps);
1846 if (err) {
1847 append_filter_err(ps, system->filter);
1848 goto out;
1851 err = replace_system_preds(system, ps, filter_string);
1852 if (err)
1853 append_filter_err(ps, system->filter);
1855 out:
1856 filter_opstack_clear(ps);
1857 postfix_clear(ps);
1858 kfree(ps);
1859 out_unlock:
1860 mutex_unlock(&event_mutex);
1862 return err;
1865 #ifdef CONFIG_PERF_EVENTS
1867 void ftrace_profile_free_filter(struct perf_event *event)
1869 struct event_filter *filter = event->filter;
1871 event->filter = NULL;
1872 __free_filter(filter);
1875 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1876 char *filter_str)
1878 int err;
1879 struct event_filter *filter;
1880 struct filter_parse_state *ps;
1881 struct ftrace_event_call *call;
1883 mutex_lock(&event_mutex);
1885 call = event->tp_event;
1887 err = -EINVAL;
1888 if (!call)
1889 goto out_unlock;
1891 err = -EEXIST;
1892 if (event->filter)
1893 goto out_unlock;
1895 filter = __alloc_filter();
1896 if (!filter) {
1897 err = PTR_ERR(filter);
1898 goto out_unlock;
1901 err = -ENOMEM;
1902 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1903 if (!ps)
1904 goto free_filter;
1906 parse_init(ps, filter_ops, filter_str);
1907 err = filter_parse(ps);
1908 if (err)
1909 goto free_ps;
1911 err = replace_preds(call, filter, ps, filter_str, false);
1912 if (!err)
1913 event->filter = filter;
1915 free_ps:
1916 filter_opstack_clear(ps);
1917 postfix_clear(ps);
1918 kfree(ps);
1920 free_filter:
1921 if (err)
1922 __free_filter(filter);
1924 out_unlock:
1925 mutex_unlock(&event_mutex);
1927 return err;
1930 #endif /* CONFIG_PERF_EVENTS */
1932 #ifdef CONFIG_FTRACE_STARTUP_TEST
1934 #include <linux/types.h>
1935 #include <linux/tracepoint.h>
1937 #define CREATE_TRACE_POINTS
1938 #include "trace_events_filter_test.h"
1940 static int test_get_filter(char *filter_str, struct ftrace_event_call *call,
1941 struct event_filter **pfilter)
1943 struct event_filter *filter;
1944 struct filter_parse_state *ps;
1945 int err = -ENOMEM;
1947 filter = __alloc_filter();
1948 if (!filter)
1949 goto out;
1951 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1952 if (!ps)
1953 goto free_filter;
1955 parse_init(ps, filter_ops, filter_str);
1956 err = filter_parse(ps);
1957 if (err)
1958 goto free_ps;
1960 err = replace_preds(call, filter, ps, filter_str, false);
1961 if (!err)
1962 *pfilter = filter;
1964 free_ps:
1965 filter_opstack_clear(ps);
1966 postfix_clear(ps);
1967 kfree(ps);
1969 free_filter:
1970 if (err)
1971 __free_filter(filter);
1973 out:
1974 return err;
1977 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
1979 .filter = FILTER, \
1980 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
1981 .e = ve, .f = vf, .g = vg, .h = vh }, \
1982 .match = m, \
1983 .not_visited = nvisit, \
1985 #define YES 1
1986 #define NO 0
1988 static struct test_filter_data_t {
1989 char *filter;
1990 struct ftrace_raw_ftrace_test_filter rec;
1991 int match;
1992 char *not_visited;
1993 } test_filter_data[] = {
1994 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
1995 "e == 1 && f == 1 && g == 1 && h == 1"
1996 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
1997 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
1998 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
1999 #undef FILTER
2000 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2001 "e == 1 || f == 1 || g == 1 || h == 1"
2002 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2003 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2004 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2005 #undef FILTER
2006 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2007 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2008 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2009 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2010 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2011 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2012 #undef FILTER
2013 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2014 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2015 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2016 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2017 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2018 #undef FILTER
2019 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2020 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2021 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2022 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2023 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2024 #undef FILTER
2025 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2026 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2027 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2028 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2029 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2030 #undef FILTER
2031 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2032 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2033 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2034 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2035 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2036 #undef FILTER
2037 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2038 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2039 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2040 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2041 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2044 #undef DATA_REC
2045 #undef FILTER
2046 #undef YES
2047 #undef NO
2049 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2051 static int test_pred_visited;
2053 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2055 struct ftrace_event_field *field = pred->field;
2057 test_pred_visited = 1;
2058 printk(KERN_INFO "\npred visited %s\n", field->name);
2059 return 1;
2062 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2063 int *err, void *data)
2065 char *fields = data;
2067 if ((move == MOVE_DOWN) &&
2068 (pred->left == FILTER_PRED_INVALID)) {
2069 struct ftrace_event_field *field = pred->field;
2071 if (!field) {
2072 WARN(1, "all leafs should have field defined");
2073 return WALK_PRED_DEFAULT;
2075 if (!strchr(fields, *field->name))
2076 return WALK_PRED_DEFAULT;
2078 WARN_ON(!pred->fn);
2079 pred->fn = test_pred_visited_fn;
2081 return WALK_PRED_DEFAULT;
2084 static __init int ftrace_test_event_filter(void)
2086 int i;
2088 printk(KERN_INFO "Testing ftrace filter: ");
2090 for (i = 0; i < DATA_CNT; i++) {
2091 struct event_filter *filter = NULL;
2092 struct test_filter_data_t *d = &test_filter_data[i];
2093 int err;
2095 err = test_get_filter(d->filter, &event_ftrace_test_filter,
2096 &filter);
2097 if (err) {
2098 printk(KERN_INFO
2099 "Failed to get filter for '%s', err %d\n",
2100 d->filter, err);
2101 break;
2105 * The preemption disabling is not really needed for self
2106 * tests, but the rcu dereference will complain without it.
2108 preempt_disable();
2109 if (*d->not_visited)
2110 walk_pred_tree(filter->preds, filter->root,
2111 test_walk_pred_cb,
2112 d->not_visited);
2114 test_pred_visited = 0;
2115 err = filter_match_preds(filter, &d->rec);
2116 preempt_enable();
2118 __free_filter(filter);
2120 if (test_pred_visited) {
2121 printk(KERN_INFO
2122 "Failed, unwanted pred visited for filter %s\n",
2123 d->filter);
2124 break;
2127 if (err != d->match) {
2128 printk(KERN_INFO
2129 "Failed to match filter '%s', expected %d\n",
2130 d->filter, d->match);
2131 break;
2135 if (i == DATA_CNT)
2136 printk(KERN_CONT "OK\n");
2138 return 0;
2141 late_initcall(ftrace_test_event_filter);
2143 #endif /* CONFIG_FTRACE_STARTUP_TEST */