cifs: clean up check_rfc1002_header
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / trace / trace_events_filter.c
blob256764ecccd66fca5045cf0a53bb9d04d8e7f106
1 /*
2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
27 #include "trace.h"
28 #include "trace_output.h"
30 enum filter_op_ids
32 OP_OR,
33 OP_AND,
34 OP_GLOB,
35 OP_NE,
36 OP_EQ,
37 OP_LT,
38 OP_LE,
39 OP_GT,
40 OP_GE,
41 OP_NONE,
42 OP_OPEN_PAREN,
45 struct filter_op {
46 int id;
47 char *string;
48 int precedence;
51 static struct filter_op filter_ops[] = {
52 { OP_OR, "||", 1 },
53 { OP_AND, "&&", 2 },
54 { OP_GLOB, "~", 4 },
55 { OP_NE, "!=", 4 },
56 { OP_EQ, "==", 4 },
57 { OP_LT, "<", 5 },
58 { OP_LE, "<=", 5 },
59 { OP_GT, ">", 5 },
60 { OP_GE, ">=", 5 },
61 { OP_NONE, "OP_NONE", 0 },
62 { OP_OPEN_PAREN, "(", 0 },
65 enum {
66 FILT_ERR_NONE,
67 FILT_ERR_INVALID_OP,
68 FILT_ERR_UNBALANCED_PAREN,
69 FILT_ERR_TOO_MANY_OPERANDS,
70 FILT_ERR_OPERAND_TOO_LONG,
71 FILT_ERR_FIELD_NOT_FOUND,
72 FILT_ERR_ILLEGAL_FIELD_OP,
73 FILT_ERR_ILLEGAL_INTVAL,
74 FILT_ERR_BAD_SUBSYS_FILTER,
75 FILT_ERR_TOO_MANY_PREDS,
76 FILT_ERR_MISSING_FIELD,
77 FILT_ERR_INVALID_FILTER,
80 static char *err_text[] = {
81 "No error",
82 "Invalid operator",
83 "Unbalanced parens",
84 "Too many operands",
85 "Operand too long",
86 "Field not found",
87 "Illegal operation for field type",
88 "Illegal integer value",
89 "Couldn't find or set field in one of a subsystem's events",
90 "Too many terms in predicate expression",
91 "Missing field name and/or value",
92 "Meaningless filter expression",
95 struct opstack_op {
96 int op;
97 struct list_head list;
100 struct postfix_elt {
101 int op;
102 char *operand;
103 struct list_head list;
106 struct filter_parse_state {
107 struct filter_op *ops;
108 struct list_head opstack;
109 struct list_head postfix;
110 int lasterr;
111 int lasterr_pos;
113 struct {
114 char *string;
115 unsigned int cnt;
116 unsigned int tail;
117 } infix;
119 struct {
120 char string[MAX_FILTER_STR_VAL];
121 int pos;
122 unsigned int tail;
123 } operand;
126 struct pred_stack {
127 struct filter_pred **preds;
128 int index;
131 #define DEFINE_COMPARISON_PRED(type) \
132 static int filter_pred_##type(struct filter_pred *pred, void *event) \
134 type *addr = (type *)(event + pred->offset); \
135 type val = (type)pred->val; \
136 int match = 0; \
138 switch (pred->op) { \
139 case OP_LT: \
140 match = (*addr < val); \
141 break; \
142 case OP_LE: \
143 match = (*addr <= val); \
144 break; \
145 case OP_GT: \
146 match = (*addr > val); \
147 break; \
148 case OP_GE: \
149 match = (*addr >= val); \
150 break; \
151 default: \
152 break; \
155 return match; \
158 #define DEFINE_EQUALITY_PRED(size) \
159 static int filter_pred_##size(struct filter_pred *pred, void *event) \
161 u##size *addr = (u##size *)(event + pred->offset); \
162 u##size val = (u##size)pred->val; \
163 int match; \
165 match = (val == *addr) ^ pred->not; \
167 return match; \
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
184 /* Filter predicate for fixed sized arrays of characters */
185 static int filter_pred_string(struct filter_pred *pred, void *event)
187 char *addr = (char *)(event + pred->offset);
188 int cmp, match;
190 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
192 match = cmp ^ pred->not;
194 return match;
197 /* Filter predicate for char * pointers */
198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
200 char **addr = (char **)(event + pred->offset);
201 int cmp, match;
202 int len = strlen(*addr) + 1; /* including tailing '\0' */
204 cmp = pred->regex.match(*addr, &pred->regex, len);
206 match = cmp ^ pred->not;
208 return match;
212 * Filter predicate for dynamic sized arrays of characters.
213 * These are implemented through a list of strings at the end
214 * of the entry.
215 * Also each of these strings have a field in the entry which
216 * contains its offset from the beginning of the entry.
217 * We have then first to get this field, dereference it
218 * and add it to the address of the entry, and at last we have
219 * the address of the string.
221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
223 u32 str_item = *(u32 *)(event + pred->offset);
224 int str_loc = str_item & 0xffff;
225 int str_len = str_item >> 16;
226 char *addr = (char *)(event + str_loc);
227 int cmp, match;
229 cmp = pred->regex.match(addr, &pred->regex, str_len);
231 match = cmp ^ pred->not;
233 return match;
236 static int filter_pred_none(struct filter_pred *pred, void *event)
238 return 0;
242 * regex_match_foo - Basic regex callbacks
244 * @str: the string to be searched
245 * @r: the regex structure containing the pattern string
246 * @len: the length of the string to be searched (including '\0')
248 * Note:
249 * - @str might not be NULL-terminated if it's of type DYN_STRING
250 * or STATIC_STRING
253 static int regex_match_full(char *str, struct regex *r, int len)
255 if (strncmp(str, r->pattern, len) == 0)
256 return 1;
257 return 0;
260 static int regex_match_front(char *str, struct regex *r, int len)
262 if (strncmp(str, r->pattern, r->len) == 0)
263 return 1;
264 return 0;
267 static int regex_match_middle(char *str, struct regex *r, int len)
269 if (strnstr(str, r->pattern, len))
270 return 1;
271 return 0;
274 static int regex_match_end(char *str, struct regex *r, int len)
276 int strlen = len - 1;
278 if (strlen >= r->len &&
279 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280 return 1;
281 return 0;
285 * filter_parse_regex - parse a basic regex
286 * @buff: the raw regex
287 * @len: length of the regex
288 * @search: will point to the beginning of the string to compare
289 * @not: tell whether the match will have to be inverted
291 * This passes in a buffer containing a regex and this function will
292 * set search to point to the search part of the buffer and
293 * return the type of search it is (see enum above).
294 * This does modify buff.
296 * Returns enum type.
297 * search returns the pointer to use for comparison.
298 * not returns 1 if buff started with a '!'
299 * 0 otherwise.
301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
303 int type = MATCH_FULL;
304 int i;
306 if (buff[0] == '!') {
307 *not = 1;
308 buff++;
309 len--;
310 } else
311 *not = 0;
313 *search = buff;
315 for (i = 0; i < len; i++) {
316 if (buff[i] == '*') {
317 if (!i) {
318 *search = buff + 1;
319 type = MATCH_END_ONLY;
320 } else {
321 if (type == MATCH_END_ONLY)
322 type = MATCH_MIDDLE_ONLY;
323 else
324 type = MATCH_FRONT_ONLY;
325 buff[i] = 0;
326 break;
331 return type;
334 static void filter_build_regex(struct filter_pred *pred)
336 struct regex *r = &pred->regex;
337 char *search;
338 enum regex_type type = MATCH_FULL;
339 int not = 0;
341 if (pred->op == OP_GLOB) {
342 type = filter_parse_regex(r->pattern, r->len, &search, &not);
343 r->len = strlen(search);
344 memmove(r->pattern, search, r->len+1);
347 switch (type) {
348 case MATCH_FULL:
349 r->match = regex_match_full;
350 break;
351 case MATCH_FRONT_ONLY:
352 r->match = regex_match_front;
353 break;
354 case MATCH_MIDDLE_ONLY:
355 r->match = regex_match_middle;
356 break;
357 case MATCH_END_ONLY:
358 r->match = regex_match_end;
359 break;
362 pred->not ^= not;
365 enum move_type {
366 MOVE_DOWN,
367 MOVE_UP_FROM_LEFT,
368 MOVE_UP_FROM_RIGHT
371 static struct filter_pred *
372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373 int index, enum move_type *move)
375 if (pred->parent & FILTER_PRED_IS_RIGHT)
376 *move = MOVE_UP_FROM_RIGHT;
377 else
378 *move = MOVE_UP_FROM_LEFT;
379 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
381 return pred;
385 * A series of AND or ORs where found together. Instead of
386 * climbing up and down the tree branches, an array of the
387 * ops were made in order of checks. We can just move across
388 * the array and short circuit if needed.
390 static int process_ops(struct filter_pred *preds,
391 struct filter_pred *op, void *rec)
393 struct filter_pred *pred;
394 int match = 0;
395 int type;
396 int i;
399 * Micro-optimization: We set type to true if op
400 * is an OR and false otherwise (AND). Then we
401 * just need to test if the match is equal to
402 * the type, and if it is, we can short circuit the
403 * rest of the checks:
405 * if ((match && op->op == OP_OR) ||
406 * (!match && op->op == OP_AND))
407 * return match;
409 type = op->op == OP_OR;
411 for (i = 0; i < op->val; i++) {
412 pred = &preds[op->ops[i]];
413 match = pred->fn(pred, rec);
414 if (!!match == type)
415 return match;
417 return match;
420 /* return 1 if event matches, 0 otherwise (discard) */
421 int filter_match_preds(struct event_filter *filter, void *rec)
423 int match = -1;
424 enum move_type move = MOVE_DOWN;
425 struct filter_pred *preds;
426 struct filter_pred *pred;
427 struct filter_pred *root;
428 int n_preds;
429 int done = 0;
431 /* no filter is considered a match */
432 if (!filter)
433 return 1;
435 n_preds = filter->n_preds;
437 if (!n_preds)
438 return 1;
441 * n_preds, root and filter->preds are protect with preemption disabled.
443 preds = rcu_dereference_sched(filter->preds);
444 root = rcu_dereference_sched(filter->root);
445 if (!root)
446 return 1;
448 pred = root;
450 /* match is currently meaningless */
451 match = -1;
453 do {
454 switch (move) {
455 case MOVE_DOWN:
456 /* only AND and OR have children */
457 if (pred->left != FILTER_PRED_INVALID) {
458 /* If ops is set, then it was folded. */
459 if (!pred->ops) {
460 /* keep going to down the left side */
461 pred = &preds[pred->left];
462 continue;
464 /* We can treat folded ops as a leaf node */
465 match = process_ops(preds, pred, rec);
466 } else
467 match = pred->fn(pred, rec);
468 /* If this pred is the only pred */
469 if (pred == root)
470 break;
471 pred = get_pred_parent(pred, preds,
472 pred->parent, &move);
473 continue;
474 case MOVE_UP_FROM_LEFT:
476 * Check for short circuits.
478 * Optimization: !!match == (pred->op == OP_OR)
479 * is the same as:
480 * if ((match && pred->op == OP_OR) ||
481 * (!match && pred->op == OP_AND))
483 if (!!match == (pred->op == OP_OR)) {
484 if (pred == root)
485 break;
486 pred = get_pred_parent(pred, preds,
487 pred->parent, &move);
488 continue;
490 /* now go down the right side of the tree. */
491 pred = &preds[pred->right];
492 move = MOVE_DOWN;
493 continue;
494 case MOVE_UP_FROM_RIGHT:
495 /* We finished this equation. */
496 if (pred == root)
497 break;
498 pred = get_pred_parent(pred, preds,
499 pred->parent, &move);
500 continue;
502 done = 1;
503 } while (!done);
505 return match;
507 EXPORT_SYMBOL_GPL(filter_match_preds);
509 static void parse_error(struct filter_parse_state *ps, int err, int pos)
511 ps->lasterr = err;
512 ps->lasterr_pos = pos;
515 static void remove_filter_string(struct event_filter *filter)
517 if (!filter)
518 return;
520 kfree(filter->filter_string);
521 filter->filter_string = NULL;
524 static int replace_filter_string(struct event_filter *filter,
525 char *filter_string)
527 kfree(filter->filter_string);
528 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
529 if (!filter->filter_string)
530 return -ENOMEM;
532 return 0;
535 static int append_filter_string(struct event_filter *filter,
536 char *string)
538 int newlen;
539 char *new_filter_string;
541 BUG_ON(!filter->filter_string);
542 newlen = strlen(filter->filter_string) + strlen(string) + 1;
543 new_filter_string = kmalloc(newlen, GFP_KERNEL);
544 if (!new_filter_string)
545 return -ENOMEM;
547 strcpy(new_filter_string, filter->filter_string);
548 strcat(new_filter_string, string);
549 kfree(filter->filter_string);
550 filter->filter_string = new_filter_string;
552 return 0;
555 static void append_filter_err(struct filter_parse_state *ps,
556 struct event_filter *filter)
558 int pos = ps->lasterr_pos;
559 char *buf, *pbuf;
561 buf = (char *)__get_free_page(GFP_TEMPORARY);
562 if (!buf)
563 return;
565 append_filter_string(filter, "\n");
566 memset(buf, ' ', PAGE_SIZE);
567 if (pos > PAGE_SIZE - 128)
568 pos = 0;
569 buf[pos] = '^';
570 pbuf = &buf[pos] + 1;
572 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
573 append_filter_string(filter, buf);
574 free_page((unsigned long) buf);
577 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
579 struct event_filter *filter;
581 mutex_lock(&event_mutex);
582 filter = call->filter;
583 if (filter && filter->filter_string)
584 trace_seq_printf(s, "%s\n", filter->filter_string);
585 else
586 trace_seq_printf(s, "none\n");
587 mutex_unlock(&event_mutex);
590 void print_subsystem_event_filter(struct event_subsystem *system,
591 struct trace_seq *s)
593 struct event_filter *filter;
595 mutex_lock(&event_mutex);
596 filter = system->filter;
597 if (filter && filter->filter_string)
598 trace_seq_printf(s, "%s\n", filter->filter_string);
599 else
600 trace_seq_printf(s, "none\n");
601 mutex_unlock(&event_mutex);
604 static struct ftrace_event_field *
605 __find_event_field(struct list_head *head, char *name)
607 struct ftrace_event_field *field;
609 list_for_each_entry(field, head, link) {
610 if (!strcmp(field->name, name))
611 return field;
614 return NULL;
617 static struct ftrace_event_field *
618 find_event_field(struct ftrace_event_call *call, char *name)
620 struct ftrace_event_field *field;
621 struct list_head *head;
623 field = __find_event_field(&ftrace_common_fields, name);
624 if (field)
625 return field;
627 head = trace_get_fields(call);
628 return __find_event_field(head, name);
631 static void filter_free_pred(struct filter_pred *pred)
633 if (!pred)
634 return;
636 kfree(pred->field_name);
637 kfree(pred);
640 static void filter_clear_pred(struct filter_pred *pred)
642 kfree(pred->field_name);
643 pred->field_name = NULL;
644 pred->regex.len = 0;
647 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
649 stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
650 if (!stack->preds)
651 return -ENOMEM;
652 stack->index = n_preds;
653 return 0;
656 static void __free_pred_stack(struct pred_stack *stack)
658 kfree(stack->preds);
659 stack->index = 0;
662 static int __push_pred_stack(struct pred_stack *stack,
663 struct filter_pred *pred)
665 int index = stack->index;
667 if (WARN_ON(index == 0))
668 return -ENOSPC;
670 stack->preds[--index] = pred;
671 stack->index = index;
672 return 0;
675 static struct filter_pred *
676 __pop_pred_stack(struct pred_stack *stack)
678 struct filter_pred *pred;
679 int index = stack->index;
681 pred = stack->preds[index++];
682 if (!pred)
683 return NULL;
685 stack->index = index;
686 return pred;
689 static int filter_set_pred(struct event_filter *filter,
690 int idx,
691 struct pred_stack *stack,
692 struct filter_pred *src,
693 filter_pred_fn_t fn)
695 struct filter_pred *dest = &filter->preds[idx];
696 struct filter_pred *left;
697 struct filter_pred *right;
699 *dest = *src;
700 if (src->field_name) {
701 dest->field_name = kstrdup(src->field_name, GFP_KERNEL);
702 if (!dest->field_name)
703 return -ENOMEM;
705 dest->fn = fn;
706 dest->index = idx;
708 if (dest->op == OP_OR || dest->op == OP_AND) {
709 right = __pop_pred_stack(stack);
710 left = __pop_pred_stack(stack);
711 if (!left || !right)
712 return -EINVAL;
714 * If both children can be folded
715 * and they are the same op as this op or a leaf,
716 * then this op can be folded.
718 if (left->index & FILTER_PRED_FOLD &&
719 (left->op == dest->op ||
720 left->left == FILTER_PRED_INVALID) &&
721 right->index & FILTER_PRED_FOLD &&
722 (right->op == dest->op ||
723 right->left == FILTER_PRED_INVALID))
724 dest->index |= FILTER_PRED_FOLD;
726 dest->left = left->index & ~FILTER_PRED_FOLD;
727 dest->right = right->index & ~FILTER_PRED_FOLD;
728 left->parent = dest->index & ~FILTER_PRED_FOLD;
729 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
730 } else {
732 * Make dest->left invalid to be used as a quick
733 * way to know this is a leaf node.
735 dest->left = FILTER_PRED_INVALID;
737 /* All leafs allow folding the parent ops. */
738 dest->index |= FILTER_PRED_FOLD;
741 return __push_pred_stack(stack, dest);
744 static void __free_preds(struct event_filter *filter)
746 int i;
748 if (filter->preds) {
749 for (i = 0; i < filter->a_preds; i++)
750 kfree(filter->preds[i].field_name);
751 kfree(filter->preds);
752 filter->preds = NULL;
754 filter->a_preds = 0;
755 filter->n_preds = 0;
758 static void filter_disable(struct ftrace_event_call *call)
760 call->flags &= ~TRACE_EVENT_FL_FILTERED;
763 static void __free_filter(struct event_filter *filter)
765 if (!filter)
766 return;
768 __free_preds(filter);
769 kfree(filter->filter_string);
770 kfree(filter);
774 * Called when destroying the ftrace_event_call.
775 * The call is being freed, so we do not need to worry about
776 * the call being currently used. This is for module code removing
777 * the tracepoints from within it.
779 void destroy_preds(struct ftrace_event_call *call)
781 __free_filter(call->filter);
782 call->filter = NULL;
785 static struct event_filter *__alloc_filter(void)
787 struct event_filter *filter;
789 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
790 return filter;
793 static int __alloc_preds(struct event_filter *filter, int n_preds)
795 struct filter_pred *pred;
796 int i;
798 if (filter->preds)
799 __free_preds(filter);
801 filter->preds =
802 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
804 if (!filter->preds)
805 return -ENOMEM;
807 filter->a_preds = n_preds;
808 filter->n_preds = 0;
810 for (i = 0; i < n_preds; i++) {
811 pred = &filter->preds[i];
812 pred->fn = filter_pred_none;
815 return 0;
818 static void filter_free_subsystem_preds(struct event_subsystem *system)
820 struct ftrace_event_call *call;
822 list_for_each_entry(call, &ftrace_events, list) {
823 if (strcmp(call->class->system, system->name) != 0)
824 continue;
826 filter_disable(call);
827 remove_filter_string(call->filter);
831 static void filter_free_subsystem_filters(struct event_subsystem *system)
833 struct ftrace_event_call *call;
835 list_for_each_entry(call, &ftrace_events, list) {
836 if (strcmp(call->class->system, system->name) != 0)
837 continue;
838 __free_filter(call->filter);
839 call->filter = NULL;
843 static int filter_add_pred_fn(struct filter_parse_state *ps,
844 struct ftrace_event_call *call,
845 struct event_filter *filter,
846 struct filter_pred *pred,
847 struct pred_stack *stack,
848 filter_pred_fn_t fn)
850 int idx, err;
852 if (WARN_ON(filter->n_preds == filter->a_preds)) {
853 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
854 return -ENOSPC;
857 idx = filter->n_preds;
858 filter_clear_pred(&filter->preds[idx]);
859 err = filter_set_pred(filter, idx, stack, pred, fn);
860 if (err)
861 return err;
863 filter->n_preds++;
865 return 0;
868 int filter_assign_type(const char *type)
870 if (strstr(type, "__data_loc") && strstr(type, "char"))
871 return FILTER_DYN_STRING;
873 if (strchr(type, '[') && strstr(type, "char"))
874 return FILTER_STATIC_STRING;
876 return FILTER_OTHER;
879 static bool is_string_field(struct ftrace_event_field *field)
881 return field->filter_type == FILTER_DYN_STRING ||
882 field->filter_type == FILTER_STATIC_STRING ||
883 field->filter_type == FILTER_PTR_STRING;
886 static int is_legal_op(struct ftrace_event_field *field, int op)
888 if (is_string_field(field) &&
889 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
890 return 0;
891 if (!is_string_field(field) && op == OP_GLOB)
892 return 0;
894 return 1;
897 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
898 int field_is_signed)
900 filter_pred_fn_t fn = NULL;
902 switch (field_size) {
903 case 8:
904 if (op == OP_EQ || op == OP_NE)
905 fn = filter_pred_64;
906 else if (field_is_signed)
907 fn = filter_pred_s64;
908 else
909 fn = filter_pred_u64;
910 break;
911 case 4:
912 if (op == OP_EQ || op == OP_NE)
913 fn = filter_pred_32;
914 else if (field_is_signed)
915 fn = filter_pred_s32;
916 else
917 fn = filter_pred_u32;
918 break;
919 case 2:
920 if (op == OP_EQ || op == OP_NE)
921 fn = filter_pred_16;
922 else if (field_is_signed)
923 fn = filter_pred_s16;
924 else
925 fn = filter_pred_u16;
926 break;
927 case 1:
928 if (op == OP_EQ || op == OP_NE)
929 fn = filter_pred_8;
930 else if (field_is_signed)
931 fn = filter_pred_s8;
932 else
933 fn = filter_pred_u8;
934 break;
937 return fn;
940 static int filter_add_pred(struct filter_parse_state *ps,
941 struct ftrace_event_call *call,
942 struct event_filter *filter,
943 struct filter_pred *pred,
944 struct pred_stack *stack,
945 bool dry_run)
947 struct ftrace_event_field *field;
948 filter_pred_fn_t fn;
949 unsigned long long val;
950 int ret;
952 fn = pred->fn = filter_pred_none;
954 if (pred->op == OP_AND)
955 goto add_pred_fn;
956 else if (pred->op == OP_OR)
957 goto add_pred_fn;
959 field = find_event_field(call, pred->field_name);
960 if (!field) {
961 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
962 return -EINVAL;
965 pred->offset = field->offset;
967 if (!is_legal_op(field, pred->op)) {
968 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
969 return -EINVAL;
972 if (is_string_field(field)) {
973 filter_build_regex(pred);
975 if (field->filter_type == FILTER_STATIC_STRING) {
976 fn = filter_pred_string;
977 pred->regex.field_len = field->size;
978 } else if (field->filter_type == FILTER_DYN_STRING)
979 fn = filter_pred_strloc;
980 else
981 fn = filter_pred_pchar;
982 } else {
983 if (field->is_signed)
984 ret = strict_strtoll(pred->regex.pattern, 0, &val);
985 else
986 ret = strict_strtoull(pred->regex.pattern, 0, &val);
987 if (ret) {
988 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
989 return -EINVAL;
991 pred->val = val;
993 fn = select_comparison_fn(pred->op, field->size,
994 field->is_signed);
995 if (!fn) {
996 parse_error(ps, FILT_ERR_INVALID_OP, 0);
997 return -EINVAL;
1001 if (pred->op == OP_NE)
1002 pred->not = 1;
1004 add_pred_fn:
1005 if (!dry_run)
1006 return filter_add_pred_fn(ps, call, filter, pred, stack, fn);
1007 return 0;
1010 static void parse_init(struct filter_parse_state *ps,
1011 struct filter_op *ops,
1012 char *infix_string)
1014 memset(ps, '\0', sizeof(*ps));
1016 ps->infix.string = infix_string;
1017 ps->infix.cnt = strlen(infix_string);
1018 ps->ops = ops;
1020 INIT_LIST_HEAD(&ps->opstack);
1021 INIT_LIST_HEAD(&ps->postfix);
1024 static char infix_next(struct filter_parse_state *ps)
1026 ps->infix.cnt--;
1028 return ps->infix.string[ps->infix.tail++];
1031 static char infix_peek(struct filter_parse_state *ps)
1033 if (ps->infix.tail == strlen(ps->infix.string))
1034 return 0;
1036 return ps->infix.string[ps->infix.tail];
1039 static void infix_advance(struct filter_parse_state *ps)
1041 ps->infix.cnt--;
1042 ps->infix.tail++;
1045 static inline int is_precedence_lower(struct filter_parse_state *ps,
1046 int a, int b)
1048 return ps->ops[a].precedence < ps->ops[b].precedence;
1051 static inline int is_op_char(struct filter_parse_state *ps, char c)
1053 int i;
1055 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1056 if (ps->ops[i].string[0] == c)
1057 return 1;
1060 return 0;
1063 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1065 char nextc = infix_peek(ps);
1066 char opstr[3];
1067 int i;
1069 opstr[0] = firstc;
1070 opstr[1] = nextc;
1071 opstr[2] = '\0';
1073 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1074 if (!strcmp(opstr, ps->ops[i].string)) {
1075 infix_advance(ps);
1076 return ps->ops[i].id;
1080 opstr[1] = '\0';
1082 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1083 if (!strcmp(opstr, ps->ops[i].string))
1084 return ps->ops[i].id;
1087 return OP_NONE;
1090 static inline void clear_operand_string(struct filter_parse_state *ps)
1092 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1093 ps->operand.tail = 0;
1096 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1098 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1099 return -EINVAL;
1101 ps->operand.string[ps->operand.tail++] = c;
1103 return 0;
1106 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1108 struct opstack_op *opstack_op;
1110 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1111 if (!opstack_op)
1112 return -ENOMEM;
1114 opstack_op->op = op;
1115 list_add(&opstack_op->list, &ps->opstack);
1117 return 0;
1120 static int filter_opstack_empty(struct filter_parse_state *ps)
1122 return list_empty(&ps->opstack);
1125 static int filter_opstack_top(struct filter_parse_state *ps)
1127 struct opstack_op *opstack_op;
1129 if (filter_opstack_empty(ps))
1130 return OP_NONE;
1132 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1134 return opstack_op->op;
1137 static int filter_opstack_pop(struct filter_parse_state *ps)
1139 struct opstack_op *opstack_op;
1140 int op;
1142 if (filter_opstack_empty(ps))
1143 return OP_NONE;
1145 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1146 op = opstack_op->op;
1147 list_del(&opstack_op->list);
1149 kfree(opstack_op);
1151 return op;
1154 static void filter_opstack_clear(struct filter_parse_state *ps)
1156 while (!filter_opstack_empty(ps))
1157 filter_opstack_pop(ps);
1160 static char *curr_operand(struct filter_parse_state *ps)
1162 return ps->operand.string;
1165 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1167 struct postfix_elt *elt;
1169 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1170 if (!elt)
1171 return -ENOMEM;
1173 elt->op = OP_NONE;
1174 elt->operand = kstrdup(operand, GFP_KERNEL);
1175 if (!elt->operand) {
1176 kfree(elt);
1177 return -ENOMEM;
1180 list_add_tail(&elt->list, &ps->postfix);
1182 return 0;
1185 static int postfix_append_op(struct filter_parse_state *ps, int op)
1187 struct postfix_elt *elt;
1189 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1190 if (!elt)
1191 return -ENOMEM;
1193 elt->op = op;
1194 elt->operand = NULL;
1196 list_add_tail(&elt->list, &ps->postfix);
1198 return 0;
1201 static void postfix_clear(struct filter_parse_state *ps)
1203 struct postfix_elt *elt;
1205 while (!list_empty(&ps->postfix)) {
1206 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1207 list_del(&elt->list);
1208 kfree(elt->operand);
1209 kfree(elt);
1213 static int filter_parse(struct filter_parse_state *ps)
1215 int in_string = 0;
1216 int op, top_op;
1217 char ch;
1219 while ((ch = infix_next(ps))) {
1220 if (ch == '"') {
1221 in_string ^= 1;
1222 continue;
1225 if (in_string)
1226 goto parse_operand;
1228 if (isspace(ch))
1229 continue;
1231 if (is_op_char(ps, ch)) {
1232 op = infix_get_op(ps, ch);
1233 if (op == OP_NONE) {
1234 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1235 return -EINVAL;
1238 if (strlen(curr_operand(ps))) {
1239 postfix_append_operand(ps, curr_operand(ps));
1240 clear_operand_string(ps);
1243 while (!filter_opstack_empty(ps)) {
1244 top_op = filter_opstack_top(ps);
1245 if (!is_precedence_lower(ps, top_op, op)) {
1246 top_op = filter_opstack_pop(ps);
1247 postfix_append_op(ps, top_op);
1248 continue;
1250 break;
1253 filter_opstack_push(ps, op);
1254 continue;
1257 if (ch == '(') {
1258 filter_opstack_push(ps, OP_OPEN_PAREN);
1259 continue;
1262 if (ch == ')') {
1263 if (strlen(curr_operand(ps))) {
1264 postfix_append_operand(ps, curr_operand(ps));
1265 clear_operand_string(ps);
1268 top_op = filter_opstack_pop(ps);
1269 while (top_op != OP_NONE) {
1270 if (top_op == OP_OPEN_PAREN)
1271 break;
1272 postfix_append_op(ps, top_op);
1273 top_op = filter_opstack_pop(ps);
1275 if (top_op == OP_NONE) {
1276 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1277 return -EINVAL;
1279 continue;
1281 parse_operand:
1282 if (append_operand_char(ps, ch)) {
1283 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1284 return -EINVAL;
1288 if (strlen(curr_operand(ps)))
1289 postfix_append_operand(ps, curr_operand(ps));
1291 while (!filter_opstack_empty(ps)) {
1292 top_op = filter_opstack_pop(ps);
1293 if (top_op == OP_NONE)
1294 break;
1295 if (top_op == OP_OPEN_PAREN) {
1296 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1297 return -EINVAL;
1299 postfix_append_op(ps, top_op);
1302 return 0;
1305 static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
1307 struct filter_pred *pred;
1309 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
1310 if (!pred)
1311 return NULL;
1313 pred->field_name = kstrdup(operand1, GFP_KERNEL);
1314 if (!pred->field_name) {
1315 kfree(pred);
1316 return NULL;
1319 strcpy(pred->regex.pattern, operand2);
1320 pred->regex.len = strlen(pred->regex.pattern);
1322 pred->op = op;
1324 return pred;
1327 static struct filter_pred *create_logical_pred(int op)
1329 struct filter_pred *pred;
1331 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
1332 if (!pred)
1333 return NULL;
1335 pred->op = op;
1337 return pred;
1340 static int check_preds(struct filter_parse_state *ps)
1342 int n_normal_preds = 0, n_logical_preds = 0;
1343 struct postfix_elt *elt;
1345 list_for_each_entry(elt, &ps->postfix, list) {
1346 if (elt->op == OP_NONE)
1347 continue;
1349 if (elt->op == OP_AND || elt->op == OP_OR) {
1350 n_logical_preds++;
1351 continue;
1353 n_normal_preds++;
1356 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1357 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1358 return -EINVAL;
1361 return 0;
1364 static int count_preds(struct filter_parse_state *ps)
1366 struct postfix_elt *elt;
1367 int n_preds = 0;
1369 list_for_each_entry(elt, &ps->postfix, list) {
1370 if (elt->op == OP_NONE)
1371 continue;
1372 n_preds++;
1375 return n_preds;
1379 * The tree is walked at filtering of an event. If the tree is not correctly
1380 * built, it may cause an infinite loop. Check here that the tree does
1381 * indeed terminate.
1383 static int check_pred_tree(struct event_filter *filter,
1384 struct filter_pred *root)
1386 struct filter_pred *preds;
1387 struct filter_pred *pred;
1388 enum move_type move = MOVE_DOWN;
1389 int count = 0;
1390 int done = 0;
1391 int max;
1394 * The max that we can hit a node is three times.
1395 * Once going down, once coming up from left, and
1396 * once coming up from right. This is more than enough
1397 * since leafs are only hit a single time.
1399 max = 3 * filter->n_preds;
1401 preds = filter->preds;
1402 if (!preds)
1403 return -EINVAL;
1404 pred = root;
1406 do {
1407 if (WARN_ON(count++ > max))
1408 return -EINVAL;
1410 switch (move) {
1411 case MOVE_DOWN:
1412 if (pred->left != FILTER_PRED_INVALID) {
1413 pred = &preds[pred->left];
1414 continue;
1416 /* A leaf at the root is just a leaf in the tree */
1417 if (pred == root)
1418 break;
1419 pred = get_pred_parent(pred, preds,
1420 pred->parent, &move);
1421 continue;
1422 case MOVE_UP_FROM_LEFT:
1423 pred = &preds[pred->right];
1424 move = MOVE_DOWN;
1425 continue;
1426 case MOVE_UP_FROM_RIGHT:
1427 if (pred == root)
1428 break;
1429 pred = get_pred_parent(pred, preds,
1430 pred->parent, &move);
1431 continue;
1433 done = 1;
1434 } while (!done);
1436 /* We are fine. */
1437 return 0;
1440 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1442 struct filter_pred *pred;
1443 enum move_type move = MOVE_DOWN;
1444 int count = 0;
1445 int done = 0;
1447 pred = root;
1449 do {
1450 switch (move) {
1451 case MOVE_DOWN:
1452 if (pred->left != FILTER_PRED_INVALID) {
1453 pred = &preds[pred->left];
1454 continue;
1456 /* A leaf at the root is just a leaf in the tree */
1457 if (pred == root)
1458 return 1;
1459 count++;
1460 pred = get_pred_parent(pred, preds,
1461 pred->parent, &move);
1462 continue;
1463 case MOVE_UP_FROM_LEFT:
1464 pred = &preds[pred->right];
1465 move = MOVE_DOWN;
1466 continue;
1467 case MOVE_UP_FROM_RIGHT:
1468 if (pred == root)
1469 break;
1470 pred = get_pred_parent(pred, preds,
1471 pred->parent, &move);
1472 continue;
1474 done = 1;
1475 } while (!done);
1477 return count;
1480 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1482 struct filter_pred *pred;
1483 enum move_type move = MOVE_DOWN;
1484 int count = 0;
1485 int children;
1486 int done = 0;
1488 /* No need to keep the fold flag */
1489 root->index &= ~FILTER_PRED_FOLD;
1491 /* If the root is a leaf then do nothing */
1492 if (root->left == FILTER_PRED_INVALID)
1493 return 0;
1495 /* count the children */
1496 children = count_leafs(preds, &preds[root->left]);
1497 children += count_leafs(preds, &preds[root->right]);
1499 root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1500 if (!root->ops)
1501 return -ENOMEM;
1503 root->val = children;
1505 pred = root;
1506 do {
1507 switch (move) {
1508 case MOVE_DOWN:
1509 if (pred->left != FILTER_PRED_INVALID) {
1510 pred = &preds[pred->left];
1511 continue;
1513 if (WARN_ON(count == children))
1514 return -EINVAL;
1515 pred->index &= ~FILTER_PRED_FOLD;
1516 root->ops[count++] = pred->index;
1517 pred = get_pred_parent(pred, preds,
1518 pred->parent, &move);
1519 continue;
1520 case MOVE_UP_FROM_LEFT:
1521 pred = &preds[pred->right];
1522 move = MOVE_DOWN;
1523 continue;
1524 case MOVE_UP_FROM_RIGHT:
1525 if (pred == root)
1526 break;
1527 pred = get_pred_parent(pred, preds,
1528 pred->parent, &move);
1529 continue;
1531 done = 1;
1532 } while (!done);
1534 return 0;
1538 * To optimize the processing of the ops, if we have several "ors" or
1539 * "ands" together, we can put them in an array and process them all
1540 * together speeding up the filter logic.
1542 static int fold_pred_tree(struct event_filter *filter,
1543 struct filter_pred *root)
1545 struct filter_pred *preds;
1546 struct filter_pred *pred;
1547 enum move_type move = MOVE_DOWN;
1548 int done = 0;
1549 int err;
1551 preds = filter->preds;
1552 if (!preds)
1553 return -EINVAL;
1554 pred = root;
1556 do {
1557 switch (move) {
1558 case MOVE_DOWN:
1559 if (pred->index & FILTER_PRED_FOLD) {
1560 err = fold_pred(preds, pred);
1561 if (err)
1562 return err;
1563 /* Folded nodes are like leafs */
1564 } else if (pred->left != FILTER_PRED_INVALID) {
1565 pred = &preds[pred->left];
1566 continue;
1569 /* A leaf at the root is just a leaf in the tree */
1570 if (pred == root)
1571 break;
1572 pred = get_pred_parent(pred, preds,
1573 pred->parent, &move);
1574 continue;
1575 case MOVE_UP_FROM_LEFT:
1576 pred = &preds[pred->right];
1577 move = MOVE_DOWN;
1578 continue;
1579 case MOVE_UP_FROM_RIGHT:
1580 if (pred == root)
1581 break;
1582 pred = get_pred_parent(pred, preds,
1583 pred->parent, &move);
1584 continue;
1586 done = 1;
1587 } while (!done);
1589 return 0;
1592 static int replace_preds(struct ftrace_event_call *call,
1593 struct event_filter *filter,
1594 struct filter_parse_state *ps,
1595 char *filter_string,
1596 bool dry_run)
1598 char *operand1 = NULL, *operand2 = NULL;
1599 struct filter_pred *pred;
1600 struct filter_pred *root;
1601 struct postfix_elt *elt;
1602 struct pred_stack stack = { }; /* init to NULL */
1603 int err;
1604 int n_preds = 0;
1606 n_preds = count_preds(ps);
1607 if (n_preds >= MAX_FILTER_PRED) {
1608 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1609 return -ENOSPC;
1612 err = check_preds(ps);
1613 if (err)
1614 return err;
1616 if (!dry_run) {
1617 err = __alloc_pred_stack(&stack, n_preds);
1618 if (err)
1619 return err;
1620 err = __alloc_preds(filter, n_preds);
1621 if (err)
1622 goto fail;
1625 n_preds = 0;
1626 list_for_each_entry(elt, &ps->postfix, list) {
1627 if (elt->op == OP_NONE) {
1628 if (!operand1)
1629 operand1 = elt->operand;
1630 else if (!operand2)
1631 operand2 = elt->operand;
1632 else {
1633 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1634 err = -EINVAL;
1635 goto fail;
1637 continue;
1640 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1641 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1642 err = -ENOSPC;
1643 goto fail;
1646 if (elt->op == OP_AND || elt->op == OP_OR) {
1647 pred = create_logical_pred(elt->op);
1648 goto add_pred;
1651 if (!operand1 || !operand2) {
1652 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1653 err = -EINVAL;
1654 goto fail;
1657 pred = create_pred(elt->op, operand1, operand2);
1658 add_pred:
1659 if (!pred) {
1660 err = -ENOMEM;
1661 goto fail;
1663 err = filter_add_pred(ps, call, filter, pred, &stack, dry_run);
1664 filter_free_pred(pred);
1665 if (err)
1666 goto fail;
1668 operand1 = operand2 = NULL;
1671 if (!dry_run) {
1672 /* We should have one item left on the stack */
1673 pred = __pop_pred_stack(&stack);
1674 if (!pred)
1675 return -EINVAL;
1676 /* This item is where we start from in matching */
1677 root = pred;
1678 /* Make sure the stack is empty */
1679 pred = __pop_pred_stack(&stack);
1680 if (WARN_ON(pred)) {
1681 err = -EINVAL;
1682 filter->root = NULL;
1683 goto fail;
1685 err = check_pred_tree(filter, root);
1686 if (err)
1687 goto fail;
1689 /* Optimize the tree */
1690 err = fold_pred_tree(filter, root);
1691 if (err)
1692 goto fail;
1694 /* We don't set root until we know it works */
1695 barrier();
1696 filter->root = root;
1699 err = 0;
1700 fail:
1701 __free_pred_stack(&stack);
1702 return err;
1705 struct filter_list {
1706 struct list_head list;
1707 struct event_filter *filter;
1710 static int replace_system_preds(struct event_subsystem *system,
1711 struct filter_parse_state *ps,
1712 char *filter_string)
1714 struct ftrace_event_call *call;
1715 struct filter_list *filter_item;
1716 struct filter_list *tmp;
1717 LIST_HEAD(filter_list);
1718 bool fail = true;
1719 int err;
1721 list_for_each_entry(call, &ftrace_events, list) {
1723 if (strcmp(call->class->system, system->name) != 0)
1724 continue;
1727 * Try to see if the filter can be applied
1728 * (filter arg is ignored on dry_run)
1730 err = replace_preds(call, NULL, ps, filter_string, true);
1731 if (err)
1732 goto fail;
1735 list_for_each_entry(call, &ftrace_events, list) {
1736 struct event_filter *filter;
1738 if (strcmp(call->class->system, system->name) != 0)
1739 continue;
1741 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1742 if (!filter_item)
1743 goto fail_mem;
1745 list_add_tail(&filter_item->list, &filter_list);
1747 filter_item->filter = __alloc_filter();
1748 if (!filter_item->filter)
1749 goto fail_mem;
1750 filter = filter_item->filter;
1752 /* Can only fail on no memory */
1753 err = replace_filter_string(filter, filter_string);
1754 if (err)
1755 goto fail_mem;
1757 err = replace_preds(call, filter, ps, filter_string, false);
1758 if (err) {
1759 filter_disable(call);
1760 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1761 append_filter_err(ps, filter);
1762 } else
1763 call->flags |= TRACE_EVENT_FL_FILTERED;
1765 * Regardless of if this returned an error, we still
1766 * replace the filter for the call.
1768 filter = call->filter;
1769 call->filter = filter_item->filter;
1770 filter_item->filter = filter;
1772 fail = false;
1775 if (fail)
1776 goto fail;
1779 * The calls can still be using the old filters.
1780 * Do a synchronize_sched() to ensure all calls are
1781 * done with them before we free them.
1783 synchronize_sched();
1784 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1785 __free_filter(filter_item->filter);
1786 list_del(&filter_item->list);
1787 kfree(filter_item);
1789 return 0;
1790 fail:
1791 /* No call succeeded */
1792 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1793 list_del(&filter_item->list);
1794 kfree(filter_item);
1796 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1797 return -EINVAL;
1798 fail_mem:
1799 /* If any call succeeded, we still need to sync */
1800 if (!fail)
1801 synchronize_sched();
1802 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1803 __free_filter(filter_item->filter);
1804 list_del(&filter_item->list);
1805 kfree(filter_item);
1807 return -ENOMEM;
1810 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1812 struct filter_parse_state *ps;
1813 struct event_filter *filter;
1814 struct event_filter *tmp;
1815 int err = 0;
1817 mutex_lock(&event_mutex);
1819 if (!strcmp(strstrip(filter_string), "0")) {
1820 filter_disable(call);
1821 filter = call->filter;
1822 if (!filter)
1823 goto out_unlock;
1824 call->filter = NULL;
1825 /* Make sure the filter is not being used */
1826 synchronize_sched();
1827 __free_filter(filter);
1828 goto out_unlock;
1831 err = -ENOMEM;
1832 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1833 if (!ps)
1834 goto out_unlock;
1836 filter = __alloc_filter();
1837 if (!filter) {
1838 kfree(ps);
1839 goto out_unlock;
1842 replace_filter_string(filter, filter_string);
1844 parse_init(ps, filter_ops, filter_string);
1845 err = filter_parse(ps);
1846 if (err) {
1847 append_filter_err(ps, filter);
1848 goto out;
1851 err = replace_preds(call, filter, ps, filter_string, false);
1852 if (err) {
1853 filter_disable(call);
1854 append_filter_err(ps, filter);
1855 } else
1856 call->flags |= TRACE_EVENT_FL_FILTERED;
1857 out:
1859 * Always swap the call filter with the new filter
1860 * even if there was an error. If there was an error
1861 * in the filter, we disable the filter and show the error
1862 * string
1864 tmp = call->filter;
1865 call->filter = filter;
1866 if (tmp) {
1867 /* Make sure the call is done with the filter */
1868 synchronize_sched();
1869 __free_filter(tmp);
1871 filter_opstack_clear(ps);
1872 postfix_clear(ps);
1873 kfree(ps);
1874 out_unlock:
1875 mutex_unlock(&event_mutex);
1877 return err;
1880 int apply_subsystem_event_filter(struct event_subsystem *system,
1881 char *filter_string)
1883 struct filter_parse_state *ps;
1884 struct event_filter *filter;
1885 int err = 0;
1887 mutex_lock(&event_mutex);
1889 /* Make sure the system still has events */
1890 if (!system->nr_events) {
1891 err = -ENODEV;
1892 goto out_unlock;
1895 if (!strcmp(strstrip(filter_string), "0")) {
1896 filter_free_subsystem_preds(system);
1897 remove_filter_string(system->filter);
1898 filter = system->filter;
1899 system->filter = NULL;
1900 /* Ensure all filters are no longer used */
1901 synchronize_sched();
1902 filter_free_subsystem_filters(system);
1903 __free_filter(filter);
1904 goto out_unlock;
1907 err = -ENOMEM;
1908 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1909 if (!ps)
1910 goto out_unlock;
1912 filter = __alloc_filter();
1913 if (!filter)
1914 goto out;
1916 replace_filter_string(filter, filter_string);
1918 * No event actually uses the system filter
1919 * we can free it without synchronize_sched().
1921 __free_filter(system->filter);
1922 system->filter = filter;
1924 parse_init(ps, filter_ops, filter_string);
1925 err = filter_parse(ps);
1926 if (err) {
1927 append_filter_err(ps, system->filter);
1928 goto out;
1931 err = replace_system_preds(system, ps, filter_string);
1932 if (err)
1933 append_filter_err(ps, system->filter);
1935 out:
1936 filter_opstack_clear(ps);
1937 postfix_clear(ps);
1938 kfree(ps);
1939 out_unlock:
1940 mutex_unlock(&event_mutex);
1942 return err;
1945 #ifdef CONFIG_PERF_EVENTS
1947 void ftrace_profile_free_filter(struct perf_event *event)
1949 struct event_filter *filter = event->filter;
1951 event->filter = NULL;
1952 __free_filter(filter);
1955 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1956 char *filter_str)
1958 int err;
1959 struct event_filter *filter;
1960 struct filter_parse_state *ps;
1961 struct ftrace_event_call *call = NULL;
1963 mutex_lock(&event_mutex);
1965 list_for_each_entry(call, &ftrace_events, list) {
1966 if (call->event.type == event_id)
1967 break;
1970 err = -EINVAL;
1971 if (&call->list == &ftrace_events)
1972 goto out_unlock;
1974 err = -EEXIST;
1975 if (event->filter)
1976 goto out_unlock;
1978 filter = __alloc_filter();
1979 if (!filter) {
1980 err = PTR_ERR(filter);
1981 goto out_unlock;
1984 err = -ENOMEM;
1985 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1986 if (!ps)
1987 goto free_filter;
1989 parse_init(ps, filter_ops, filter_str);
1990 err = filter_parse(ps);
1991 if (err)
1992 goto free_ps;
1994 err = replace_preds(call, filter, ps, filter_str, false);
1995 if (!err)
1996 event->filter = filter;
1998 free_ps:
1999 filter_opstack_clear(ps);
2000 postfix_clear(ps);
2001 kfree(ps);
2003 free_filter:
2004 if (err)
2005 __free_filter(filter);
2007 out_unlock:
2008 mutex_unlock(&event_mutex);
2010 return err;
2013 #endif /* CONFIG_PERF_EVENTS */