regulator: arizona-ldo1: Remove redundant error message
[linux-2.6/btrfs-unstable.git] / kernel / trace / trace_events_filter.c
blob8a8631926a07043d788ce7beb5c1fa7663a052ac
1 /*
2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
27 #include "trace.h"
28 #include "trace_output.h"
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
36 enum filter_op_ids
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NONE,
49 OP_OPEN_PAREN,
52 struct filter_op {
53 int id;
54 char *string;
55 int precedence;
58 /* Order must be the same as enum filter_op_ids above */
59 static struct filter_op filter_ops[] = {
60 { OP_OR, "||", 1 },
61 { OP_AND, "&&", 2 },
62 { OP_GLOB, "~", 4 },
63 { OP_NE, "!=", 4 },
64 { OP_EQ, "==", 4 },
65 { OP_LT, "<", 5 },
66 { OP_LE, "<=", 5 },
67 { OP_GT, ">", 5 },
68 { OP_GE, ">=", 5 },
69 { OP_BAND, "&", 6 },
70 { OP_NONE, "OP_NONE", 0 },
71 { OP_OPEN_PAREN, "(", 0 },
74 enum {
75 FILT_ERR_NONE,
76 FILT_ERR_INVALID_OP,
77 FILT_ERR_UNBALANCED_PAREN,
78 FILT_ERR_TOO_MANY_OPERANDS,
79 FILT_ERR_OPERAND_TOO_LONG,
80 FILT_ERR_FIELD_NOT_FOUND,
81 FILT_ERR_ILLEGAL_FIELD_OP,
82 FILT_ERR_ILLEGAL_INTVAL,
83 FILT_ERR_BAD_SUBSYS_FILTER,
84 FILT_ERR_TOO_MANY_PREDS,
85 FILT_ERR_MISSING_FIELD,
86 FILT_ERR_INVALID_FILTER,
87 FILT_ERR_IP_FIELD_ONLY,
90 static char *err_text[] = {
91 "No error",
92 "Invalid operator",
93 "Unbalanced parens",
94 "Too many operands",
95 "Operand too long",
96 "Field not found",
97 "Illegal operation for field type",
98 "Illegal integer value",
99 "Couldn't find or set field in one of a subsystem's events",
100 "Too many terms in predicate expression",
101 "Missing field name and/or value",
102 "Meaningless filter expression",
103 "Only 'ip' field is supported for function trace",
106 struct opstack_op {
107 int op;
108 struct list_head list;
111 struct postfix_elt {
112 int op;
113 char *operand;
114 struct list_head list;
117 struct filter_parse_state {
118 struct filter_op *ops;
119 struct list_head opstack;
120 struct list_head postfix;
121 int lasterr;
122 int lasterr_pos;
124 struct {
125 char *string;
126 unsigned int cnt;
127 unsigned int tail;
128 } infix;
130 struct {
131 char string[MAX_FILTER_STR_VAL];
132 int pos;
133 unsigned int tail;
134 } operand;
137 struct pred_stack {
138 struct filter_pred **preds;
139 int index;
142 #define DEFINE_COMPARISON_PRED(type) \
143 static int filter_pred_##type(struct filter_pred *pred, void *event) \
145 type *addr = (type *)(event + pred->offset); \
146 type val = (type)pred->val; \
147 int match = 0; \
149 switch (pred->op) { \
150 case OP_LT: \
151 match = (*addr < val); \
152 break; \
153 case OP_LE: \
154 match = (*addr <= val); \
155 break; \
156 case OP_GT: \
157 match = (*addr > val); \
158 break; \
159 case OP_GE: \
160 match = (*addr >= val); \
161 break; \
162 case OP_BAND: \
163 match = (*addr & val); \
164 break; \
165 default: \
166 break; \
169 return match; \
172 #define DEFINE_EQUALITY_PRED(size) \
173 static int filter_pred_##size(struct filter_pred *pred, void *event) \
175 u##size *addr = (u##size *)(event + pred->offset); \
176 u##size val = (u##size)pred->val; \
177 int match; \
179 match = (val == *addr) ^ pred->not; \
181 return match; \
184 DEFINE_COMPARISON_PRED(s64);
185 DEFINE_COMPARISON_PRED(u64);
186 DEFINE_COMPARISON_PRED(s32);
187 DEFINE_COMPARISON_PRED(u32);
188 DEFINE_COMPARISON_PRED(s16);
189 DEFINE_COMPARISON_PRED(u16);
190 DEFINE_COMPARISON_PRED(s8);
191 DEFINE_COMPARISON_PRED(u8);
193 DEFINE_EQUALITY_PRED(64);
194 DEFINE_EQUALITY_PRED(32);
195 DEFINE_EQUALITY_PRED(16);
196 DEFINE_EQUALITY_PRED(8);
198 /* Filter predicate for fixed sized arrays of characters */
199 static int filter_pred_string(struct filter_pred *pred, void *event)
201 char *addr = (char *)(event + pred->offset);
202 int cmp, match;
204 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
206 match = cmp ^ pred->not;
208 return match;
211 /* Filter predicate for char * pointers */
212 static int filter_pred_pchar(struct filter_pred *pred, void *event)
214 char **addr = (char **)(event + pred->offset);
215 int cmp, match;
216 int len = strlen(*addr) + 1; /* including tailing '\0' */
218 cmp = pred->regex.match(*addr, &pred->regex, len);
220 match = cmp ^ pred->not;
222 return match;
226 * Filter predicate for dynamic sized arrays of characters.
227 * These are implemented through a list of strings at the end
228 * of the entry.
229 * Also each of these strings have a field in the entry which
230 * contains its offset from the beginning of the entry.
231 * We have then first to get this field, dereference it
232 * and add it to the address of the entry, and at last we have
233 * the address of the string.
235 static int filter_pred_strloc(struct filter_pred *pred, void *event)
237 u32 str_item = *(u32 *)(event + pred->offset);
238 int str_loc = str_item & 0xffff;
239 int str_len = str_item >> 16;
240 char *addr = (char *)(event + str_loc);
241 int cmp, match;
243 cmp = pred->regex.match(addr, &pred->regex, str_len);
245 match = cmp ^ pred->not;
247 return match;
250 static int filter_pred_none(struct filter_pred *pred, void *event)
252 return 0;
256 * regex_match_foo - Basic regex callbacks
258 * @str: the string to be searched
259 * @r: the regex structure containing the pattern string
260 * @len: the length of the string to be searched (including '\0')
262 * Note:
263 * - @str might not be NULL-terminated if it's of type DYN_STRING
264 * or STATIC_STRING
267 static int regex_match_full(char *str, struct regex *r, int len)
269 if (strncmp(str, r->pattern, len) == 0)
270 return 1;
271 return 0;
274 static int regex_match_front(char *str, struct regex *r, int len)
276 if (strncmp(str, r->pattern, r->len) == 0)
277 return 1;
278 return 0;
281 static int regex_match_middle(char *str, struct regex *r, int len)
283 if (strnstr(str, r->pattern, len))
284 return 1;
285 return 0;
288 static int regex_match_end(char *str, struct regex *r, int len)
290 int strlen = len - 1;
292 if (strlen >= r->len &&
293 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
294 return 1;
295 return 0;
299 * filter_parse_regex - parse a basic regex
300 * @buff: the raw regex
301 * @len: length of the regex
302 * @search: will point to the beginning of the string to compare
303 * @not: tell whether the match will have to be inverted
305 * This passes in a buffer containing a regex and this function will
306 * set search to point to the search part of the buffer and
307 * return the type of search it is (see enum above).
308 * This does modify buff.
310 * Returns enum type.
311 * search returns the pointer to use for comparison.
312 * not returns 1 if buff started with a '!'
313 * 0 otherwise.
315 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
317 int type = MATCH_FULL;
318 int i;
320 if (buff[0] == '!') {
321 *not = 1;
322 buff++;
323 len--;
324 } else
325 *not = 0;
327 *search = buff;
329 for (i = 0; i < len; i++) {
330 if (buff[i] == '*') {
331 if (!i) {
332 *search = buff + 1;
333 type = MATCH_END_ONLY;
334 } else {
335 if (type == MATCH_END_ONLY)
336 type = MATCH_MIDDLE_ONLY;
337 else
338 type = MATCH_FRONT_ONLY;
339 buff[i] = 0;
340 break;
345 return type;
348 static void filter_build_regex(struct filter_pred *pred)
350 struct regex *r = &pred->regex;
351 char *search;
352 enum regex_type type = MATCH_FULL;
353 int not = 0;
355 if (pred->op == OP_GLOB) {
356 type = filter_parse_regex(r->pattern, r->len, &search, &not);
357 r->len = strlen(search);
358 memmove(r->pattern, search, r->len+1);
361 switch (type) {
362 case MATCH_FULL:
363 r->match = regex_match_full;
364 break;
365 case MATCH_FRONT_ONLY:
366 r->match = regex_match_front;
367 break;
368 case MATCH_MIDDLE_ONLY:
369 r->match = regex_match_middle;
370 break;
371 case MATCH_END_ONLY:
372 r->match = regex_match_end;
373 break;
376 pred->not ^= not;
379 enum move_type {
380 MOVE_DOWN,
381 MOVE_UP_FROM_LEFT,
382 MOVE_UP_FROM_RIGHT
385 static struct filter_pred *
386 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
387 int index, enum move_type *move)
389 if (pred->parent & FILTER_PRED_IS_RIGHT)
390 *move = MOVE_UP_FROM_RIGHT;
391 else
392 *move = MOVE_UP_FROM_LEFT;
393 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
395 return pred;
398 enum walk_return {
399 WALK_PRED_ABORT,
400 WALK_PRED_PARENT,
401 WALK_PRED_DEFAULT,
404 typedef int (*filter_pred_walkcb_t) (enum move_type move,
405 struct filter_pred *pred,
406 int *err, void *data);
408 static int walk_pred_tree(struct filter_pred *preds,
409 struct filter_pred *root,
410 filter_pred_walkcb_t cb, void *data)
412 struct filter_pred *pred = root;
413 enum move_type move = MOVE_DOWN;
414 int done = 0;
416 if (!preds)
417 return -EINVAL;
419 do {
420 int err = 0, ret;
422 ret = cb(move, pred, &err, data);
423 if (ret == WALK_PRED_ABORT)
424 return err;
425 if (ret == WALK_PRED_PARENT)
426 goto get_parent;
428 switch (move) {
429 case MOVE_DOWN:
430 if (pred->left != FILTER_PRED_INVALID) {
431 pred = &preds[pred->left];
432 continue;
434 goto get_parent;
435 case MOVE_UP_FROM_LEFT:
436 pred = &preds[pred->right];
437 move = MOVE_DOWN;
438 continue;
439 case MOVE_UP_FROM_RIGHT:
440 get_parent:
441 if (pred == root)
442 break;
443 pred = get_pred_parent(pred, preds,
444 pred->parent,
445 &move);
446 continue;
448 done = 1;
449 } while (!done);
451 /* We are fine. */
452 return 0;
456 * A series of AND or ORs where found together. Instead of
457 * climbing up and down the tree branches, an array of the
458 * ops were made in order of checks. We can just move across
459 * the array and short circuit if needed.
461 static int process_ops(struct filter_pred *preds,
462 struct filter_pred *op, void *rec)
464 struct filter_pred *pred;
465 int match = 0;
466 int type;
467 int i;
470 * Micro-optimization: We set type to true if op
471 * is an OR and false otherwise (AND). Then we
472 * just need to test if the match is equal to
473 * the type, and if it is, we can short circuit the
474 * rest of the checks:
476 * if ((match && op->op == OP_OR) ||
477 * (!match && op->op == OP_AND))
478 * return match;
480 type = op->op == OP_OR;
482 for (i = 0; i < op->val; i++) {
483 pred = &preds[op->ops[i]];
484 if (!WARN_ON_ONCE(!pred->fn))
485 match = pred->fn(pred, rec);
486 if (!!match == type)
487 return match;
489 return match;
492 struct filter_match_preds_data {
493 struct filter_pred *preds;
494 int match;
495 void *rec;
498 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
499 int *err, void *data)
501 struct filter_match_preds_data *d = data;
503 *err = 0;
504 switch (move) {
505 case MOVE_DOWN:
506 /* only AND and OR have children */
507 if (pred->left != FILTER_PRED_INVALID) {
508 /* If ops is set, then it was folded. */
509 if (!pred->ops)
510 return WALK_PRED_DEFAULT;
511 /* We can treat folded ops as a leaf node */
512 d->match = process_ops(d->preds, pred, d->rec);
513 } else {
514 if (!WARN_ON_ONCE(!pred->fn))
515 d->match = pred->fn(pred, d->rec);
518 return WALK_PRED_PARENT;
519 case MOVE_UP_FROM_LEFT:
521 * Check for short circuits.
523 * Optimization: !!match == (pred->op == OP_OR)
524 * is the same as:
525 * if ((match && pred->op == OP_OR) ||
526 * (!match && pred->op == OP_AND))
528 if (!!d->match == (pred->op == OP_OR))
529 return WALK_PRED_PARENT;
530 break;
531 case MOVE_UP_FROM_RIGHT:
532 break;
535 return WALK_PRED_DEFAULT;
538 /* return 1 if event matches, 0 otherwise (discard) */
539 int filter_match_preds(struct event_filter *filter, void *rec)
541 struct filter_pred *preds;
542 struct filter_pred *root;
543 struct filter_match_preds_data data = {
544 /* match is currently meaningless */
545 .match = -1,
546 .rec = rec,
548 int n_preds, ret;
550 /* no filter is considered a match */
551 if (!filter)
552 return 1;
554 n_preds = filter->n_preds;
555 if (!n_preds)
556 return 1;
559 * n_preds, root and filter->preds are protect with preemption disabled.
561 root = rcu_dereference_sched(filter->root);
562 if (!root)
563 return 1;
565 data.preds = preds = rcu_dereference_sched(filter->preds);
566 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
567 WARN_ON(ret);
568 return data.match;
570 EXPORT_SYMBOL_GPL(filter_match_preds);
572 static void parse_error(struct filter_parse_state *ps, int err, int pos)
574 ps->lasterr = err;
575 ps->lasterr_pos = pos;
578 static void remove_filter_string(struct event_filter *filter)
580 if (!filter)
581 return;
583 kfree(filter->filter_string);
584 filter->filter_string = NULL;
587 static int replace_filter_string(struct event_filter *filter,
588 char *filter_string)
590 kfree(filter->filter_string);
591 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
592 if (!filter->filter_string)
593 return -ENOMEM;
595 return 0;
598 static int append_filter_string(struct event_filter *filter,
599 char *string)
601 int newlen;
602 char *new_filter_string;
604 BUG_ON(!filter->filter_string);
605 newlen = strlen(filter->filter_string) + strlen(string) + 1;
606 new_filter_string = kmalloc(newlen, GFP_KERNEL);
607 if (!new_filter_string)
608 return -ENOMEM;
610 strcpy(new_filter_string, filter->filter_string);
611 strcat(new_filter_string, string);
612 kfree(filter->filter_string);
613 filter->filter_string = new_filter_string;
615 return 0;
618 static void append_filter_err(struct filter_parse_state *ps,
619 struct event_filter *filter)
621 int pos = ps->lasterr_pos;
622 char *buf, *pbuf;
624 buf = (char *)__get_free_page(GFP_TEMPORARY);
625 if (!buf)
626 return;
628 append_filter_string(filter, "\n");
629 memset(buf, ' ', PAGE_SIZE);
630 if (pos > PAGE_SIZE - 128)
631 pos = 0;
632 buf[pos] = '^';
633 pbuf = &buf[pos] + 1;
635 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
636 append_filter_string(filter, buf);
637 free_page((unsigned long) buf);
640 static inline struct event_filter *event_filter(struct ftrace_event_file *file)
642 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
643 return file->event_call->filter;
644 else
645 return file->filter;
648 /* caller must hold event_mutex */
649 void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
651 struct event_filter *filter = event_filter(file);
653 if (filter && filter->filter_string)
654 trace_seq_printf(s, "%s\n", filter->filter_string);
655 else
656 trace_seq_puts(s, "none\n");
659 void print_subsystem_event_filter(struct event_subsystem *system,
660 struct trace_seq *s)
662 struct event_filter *filter;
664 mutex_lock(&event_mutex);
665 filter = system->filter;
666 if (filter && filter->filter_string)
667 trace_seq_printf(s, "%s\n", filter->filter_string);
668 else
669 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
670 mutex_unlock(&event_mutex);
673 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
675 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
676 if (!stack->preds)
677 return -ENOMEM;
678 stack->index = n_preds;
679 return 0;
682 static void __free_pred_stack(struct pred_stack *stack)
684 kfree(stack->preds);
685 stack->index = 0;
688 static int __push_pred_stack(struct pred_stack *stack,
689 struct filter_pred *pred)
691 int index = stack->index;
693 if (WARN_ON(index == 0))
694 return -ENOSPC;
696 stack->preds[--index] = pred;
697 stack->index = index;
698 return 0;
701 static struct filter_pred *
702 __pop_pred_stack(struct pred_stack *stack)
704 struct filter_pred *pred;
705 int index = stack->index;
707 pred = stack->preds[index++];
708 if (!pred)
709 return NULL;
711 stack->index = index;
712 return pred;
715 static int filter_set_pred(struct event_filter *filter,
716 int idx,
717 struct pred_stack *stack,
718 struct filter_pred *src)
720 struct filter_pred *dest = &filter->preds[idx];
721 struct filter_pred *left;
722 struct filter_pred *right;
724 *dest = *src;
725 dest->index = idx;
727 if (dest->op == OP_OR || dest->op == OP_AND) {
728 right = __pop_pred_stack(stack);
729 left = __pop_pred_stack(stack);
730 if (!left || !right)
731 return -EINVAL;
733 * If both children can be folded
734 * and they are the same op as this op or a leaf,
735 * then this op can be folded.
737 if (left->index & FILTER_PRED_FOLD &&
738 (left->op == dest->op ||
739 left->left == FILTER_PRED_INVALID) &&
740 right->index & FILTER_PRED_FOLD &&
741 (right->op == dest->op ||
742 right->left == FILTER_PRED_INVALID))
743 dest->index |= FILTER_PRED_FOLD;
745 dest->left = left->index & ~FILTER_PRED_FOLD;
746 dest->right = right->index & ~FILTER_PRED_FOLD;
747 left->parent = dest->index & ~FILTER_PRED_FOLD;
748 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
749 } else {
751 * Make dest->left invalid to be used as a quick
752 * way to know this is a leaf node.
754 dest->left = FILTER_PRED_INVALID;
756 /* All leafs allow folding the parent ops. */
757 dest->index |= FILTER_PRED_FOLD;
760 return __push_pred_stack(stack, dest);
763 static void __free_preds(struct event_filter *filter)
765 int i;
767 if (filter->preds) {
768 for (i = 0; i < filter->n_preds; i++)
769 kfree(filter->preds[i].ops);
770 kfree(filter->preds);
771 filter->preds = NULL;
773 filter->a_preds = 0;
774 filter->n_preds = 0;
777 static void call_filter_disable(struct ftrace_event_call *call)
779 call->flags &= ~TRACE_EVENT_FL_FILTERED;
782 static void filter_disable(struct ftrace_event_file *file)
784 struct ftrace_event_call *call = file->event_call;
786 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
787 call_filter_disable(call);
788 else
789 file->flags &= ~FTRACE_EVENT_FL_FILTERED;
792 static void __free_filter(struct event_filter *filter)
794 if (!filter)
795 return;
797 __free_preds(filter);
798 kfree(filter->filter_string);
799 kfree(filter);
802 void free_event_filter(struct event_filter *filter)
804 __free_filter(filter);
807 void destroy_call_preds(struct ftrace_event_call *call)
809 __free_filter(call->filter);
810 call->filter = NULL;
813 static void destroy_file_preds(struct ftrace_event_file *file)
815 __free_filter(file->filter);
816 file->filter = NULL;
820 * Called when destroying the ftrace_event_file.
821 * The file is being freed, so we do not need to worry about
822 * the file being currently used. This is for module code removing
823 * the tracepoints from within it.
825 void destroy_preds(struct ftrace_event_file *file)
827 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
828 destroy_call_preds(file->event_call);
829 else
830 destroy_file_preds(file);
833 static struct event_filter *__alloc_filter(void)
835 struct event_filter *filter;
837 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
838 return filter;
841 static int __alloc_preds(struct event_filter *filter, int n_preds)
843 struct filter_pred *pred;
844 int i;
846 if (filter->preds)
847 __free_preds(filter);
849 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
851 if (!filter->preds)
852 return -ENOMEM;
854 filter->a_preds = n_preds;
855 filter->n_preds = 0;
857 for (i = 0; i < n_preds; i++) {
858 pred = &filter->preds[i];
859 pred->fn = filter_pred_none;
862 return 0;
865 static inline void __remove_filter(struct ftrace_event_file *file)
867 struct ftrace_event_call *call = file->event_call;
869 filter_disable(file);
870 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
871 remove_filter_string(call->filter);
872 else
873 remove_filter_string(file->filter);
876 static void filter_free_subsystem_preds(struct event_subsystem *system,
877 struct trace_array *tr)
879 struct ftrace_event_file *file;
880 struct ftrace_event_call *call;
882 list_for_each_entry(file, &tr->events, list) {
883 call = file->event_call;
884 if (strcmp(call->class->system, system->name) != 0)
885 continue;
887 __remove_filter(file);
891 static inline void __free_subsystem_filter(struct ftrace_event_file *file)
893 struct ftrace_event_call *call = file->event_call;
895 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
896 __free_filter(call->filter);
897 call->filter = NULL;
898 } else {
899 __free_filter(file->filter);
900 file->filter = NULL;
904 static void filter_free_subsystem_filters(struct event_subsystem *system,
905 struct trace_array *tr)
907 struct ftrace_event_file *file;
908 struct ftrace_event_call *call;
910 list_for_each_entry(file, &tr->events, list) {
911 call = file->event_call;
912 if (strcmp(call->class->system, system->name) != 0)
913 continue;
914 __free_subsystem_filter(file);
918 static int filter_add_pred(struct filter_parse_state *ps,
919 struct event_filter *filter,
920 struct filter_pred *pred,
921 struct pred_stack *stack)
923 int err;
925 if (WARN_ON(filter->n_preds == filter->a_preds)) {
926 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
927 return -ENOSPC;
930 err = filter_set_pred(filter, filter->n_preds, stack, pred);
931 if (err)
932 return err;
934 filter->n_preds++;
936 return 0;
939 int filter_assign_type(const char *type)
941 if (strstr(type, "__data_loc") && strstr(type, "char"))
942 return FILTER_DYN_STRING;
944 if (strchr(type, '[') && strstr(type, "char"))
945 return FILTER_STATIC_STRING;
947 return FILTER_OTHER;
950 static bool is_function_field(struct ftrace_event_field *field)
952 return field->filter_type == FILTER_TRACE_FN;
955 static bool is_string_field(struct ftrace_event_field *field)
957 return field->filter_type == FILTER_DYN_STRING ||
958 field->filter_type == FILTER_STATIC_STRING ||
959 field->filter_type == FILTER_PTR_STRING;
962 static int is_legal_op(struct ftrace_event_field *field, int op)
964 if (is_string_field(field) &&
965 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
966 return 0;
967 if (!is_string_field(field) && op == OP_GLOB)
968 return 0;
970 return 1;
973 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
974 int field_is_signed)
976 filter_pred_fn_t fn = NULL;
978 switch (field_size) {
979 case 8:
980 if (op == OP_EQ || op == OP_NE)
981 fn = filter_pred_64;
982 else if (field_is_signed)
983 fn = filter_pred_s64;
984 else
985 fn = filter_pred_u64;
986 break;
987 case 4:
988 if (op == OP_EQ || op == OP_NE)
989 fn = filter_pred_32;
990 else if (field_is_signed)
991 fn = filter_pred_s32;
992 else
993 fn = filter_pred_u32;
994 break;
995 case 2:
996 if (op == OP_EQ || op == OP_NE)
997 fn = filter_pred_16;
998 else if (field_is_signed)
999 fn = filter_pred_s16;
1000 else
1001 fn = filter_pred_u16;
1002 break;
1003 case 1:
1004 if (op == OP_EQ || op == OP_NE)
1005 fn = filter_pred_8;
1006 else if (field_is_signed)
1007 fn = filter_pred_s8;
1008 else
1009 fn = filter_pred_u8;
1010 break;
1013 return fn;
1016 static int init_pred(struct filter_parse_state *ps,
1017 struct ftrace_event_field *field,
1018 struct filter_pred *pred)
1021 filter_pred_fn_t fn = filter_pred_none;
1022 unsigned long long val;
1023 int ret;
1025 pred->offset = field->offset;
1027 if (!is_legal_op(field, pred->op)) {
1028 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1029 return -EINVAL;
1032 if (is_string_field(field)) {
1033 filter_build_regex(pred);
1035 if (field->filter_type == FILTER_STATIC_STRING) {
1036 fn = filter_pred_string;
1037 pred->regex.field_len = field->size;
1038 } else if (field->filter_type == FILTER_DYN_STRING)
1039 fn = filter_pred_strloc;
1040 else
1041 fn = filter_pred_pchar;
1042 } else if (is_function_field(field)) {
1043 if (strcmp(field->name, "ip")) {
1044 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1045 return -EINVAL;
1047 } else {
1048 if (field->is_signed)
1049 ret = kstrtoll(pred->regex.pattern, 0, &val);
1050 else
1051 ret = kstrtoull(pred->regex.pattern, 0, &val);
1052 if (ret) {
1053 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1054 return -EINVAL;
1056 pred->val = val;
1058 fn = select_comparison_fn(pred->op, field->size,
1059 field->is_signed);
1060 if (!fn) {
1061 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1062 return -EINVAL;
1066 if (pred->op == OP_NE)
1067 pred->not = 1;
1069 pred->fn = fn;
1070 return 0;
1073 static void parse_init(struct filter_parse_state *ps,
1074 struct filter_op *ops,
1075 char *infix_string)
1077 memset(ps, '\0', sizeof(*ps));
1079 ps->infix.string = infix_string;
1080 ps->infix.cnt = strlen(infix_string);
1081 ps->ops = ops;
1083 INIT_LIST_HEAD(&ps->opstack);
1084 INIT_LIST_HEAD(&ps->postfix);
1087 static char infix_next(struct filter_parse_state *ps)
1089 ps->infix.cnt--;
1091 return ps->infix.string[ps->infix.tail++];
1094 static char infix_peek(struct filter_parse_state *ps)
1096 if (ps->infix.tail == strlen(ps->infix.string))
1097 return 0;
1099 return ps->infix.string[ps->infix.tail];
1102 static void infix_advance(struct filter_parse_state *ps)
1104 ps->infix.cnt--;
1105 ps->infix.tail++;
1108 static inline int is_precedence_lower(struct filter_parse_state *ps,
1109 int a, int b)
1111 return ps->ops[a].precedence < ps->ops[b].precedence;
1114 static inline int is_op_char(struct filter_parse_state *ps, char c)
1116 int i;
1118 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1119 if (ps->ops[i].string[0] == c)
1120 return 1;
1123 return 0;
1126 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1128 char nextc = infix_peek(ps);
1129 char opstr[3];
1130 int i;
1132 opstr[0] = firstc;
1133 opstr[1] = nextc;
1134 opstr[2] = '\0';
1136 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1137 if (!strcmp(opstr, ps->ops[i].string)) {
1138 infix_advance(ps);
1139 return ps->ops[i].id;
1143 opstr[1] = '\0';
1145 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1146 if (!strcmp(opstr, ps->ops[i].string))
1147 return ps->ops[i].id;
1150 return OP_NONE;
1153 static inline void clear_operand_string(struct filter_parse_state *ps)
1155 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1156 ps->operand.tail = 0;
1159 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1161 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1162 return -EINVAL;
1164 ps->operand.string[ps->operand.tail++] = c;
1166 return 0;
1169 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1171 struct opstack_op *opstack_op;
1173 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1174 if (!opstack_op)
1175 return -ENOMEM;
1177 opstack_op->op = op;
1178 list_add(&opstack_op->list, &ps->opstack);
1180 return 0;
1183 static int filter_opstack_empty(struct filter_parse_state *ps)
1185 return list_empty(&ps->opstack);
1188 static int filter_opstack_top(struct filter_parse_state *ps)
1190 struct opstack_op *opstack_op;
1192 if (filter_opstack_empty(ps))
1193 return OP_NONE;
1195 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1197 return opstack_op->op;
1200 static int filter_opstack_pop(struct filter_parse_state *ps)
1202 struct opstack_op *opstack_op;
1203 int op;
1205 if (filter_opstack_empty(ps))
1206 return OP_NONE;
1208 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1209 op = opstack_op->op;
1210 list_del(&opstack_op->list);
1212 kfree(opstack_op);
1214 return op;
1217 static void filter_opstack_clear(struct filter_parse_state *ps)
1219 while (!filter_opstack_empty(ps))
1220 filter_opstack_pop(ps);
1223 static char *curr_operand(struct filter_parse_state *ps)
1225 return ps->operand.string;
1228 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1230 struct postfix_elt *elt;
1232 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1233 if (!elt)
1234 return -ENOMEM;
1236 elt->op = OP_NONE;
1237 elt->operand = kstrdup(operand, GFP_KERNEL);
1238 if (!elt->operand) {
1239 kfree(elt);
1240 return -ENOMEM;
1243 list_add_tail(&elt->list, &ps->postfix);
1245 return 0;
1248 static int postfix_append_op(struct filter_parse_state *ps, int op)
1250 struct postfix_elt *elt;
1252 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1253 if (!elt)
1254 return -ENOMEM;
1256 elt->op = op;
1257 elt->operand = NULL;
1259 list_add_tail(&elt->list, &ps->postfix);
1261 return 0;
1264 static void postfix_clear(struct filter_parse_state *ps)
1266 struct postfix_elt *elt;
1268 while (!list_empty(&ps->postfix)) {
1269 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1270 list_del(&elt->list);
1271 kfree(elt->operand);
1272 kfree(elt);
1276 static int filter_parse(struct filter_parse_state *ps)
1278 int in_string = 0;
1279 int op, top_op;
1280 char ch;
1282 while ((ch = infix_next(ps))) {
1283 if (ch == '"') {
1284 in_string ^= 1;
1285 continue;
1288 if (in_string)
1289 goto parse_operand;
1291 if (isspace(ch))
1292 continue;
1294 if (is_op_char(ps, ch)) {
1295 op = infix_get_op(ps, ch);
1296 if (op == OP_NONE) {
1297 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1298 return -EINVAL;
1301 if (strlen(curr_operand(ps))) {
1302 postfix_append_operand(ps, curr_operand(ps));
1303 clear_operand_string(ps);
1306 while (!filter_opstack_empty(ps)) {
1307 top_op = filter_opstack_top(ps);
1308 if (!is_precedence_lower(ps, top_op, op)) {
1309 top_op = filter_opstack_pop(ps);
1310 postfix_append_op(ps, top_op);
1311 continue;
1313 break;
1316 filter_opstack_push(ps, op);
1317 continue;
1320 if (ch == '(') {
1321 filter_opstack_push(ps, OP_OPEN_PAREN);
1322 continue;
1325 if (ch == ')') {
1326 if (strlen(curr_operand(ps))) {
1327 postfix_append_operand(ps, curr_operand(ps));
1328 clear_operand_string(ps);
1331 top_op = filter_opstack_pop(ps);
1332 while (top_op != OP_NONE) {
1333 if (top_op == OP_OPEN_PAREN)
1334 break;
1335 postfix_append_op(ps, top_op);
1336 top_op = filter_opstack_pop(ps);
1338 if (top_op == OP_NONE) {
1339 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1340 return -EINVAL;
1342 continue;
1344 parse_operand:
1345 if (append_operand_char(ps, ch)) {
1346 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1347 return -EINVAL;
1351 if (strlen(curr_operand(ps)))
1352 postfix_append_operand(ps, curr_operand(ps));
1354 while (!filter_opstack_empty(ps)) {
1355 top_op = filter_opstack_pop(ps);
1356 if (top_op == OP_NONE)
1357 break;
1358 if (top_op == OP_OPEN_PAREN) {
1359 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1360 return -EINVAL;
1362 postfix_append_op(ps, top_op);
1365 return 0;
1368 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1369 struct ftrace_event_call *call,
1370 int op, char *operand1, char *operand2)
1372 struct ftrace_event_field *field;
1373 static struct filter_pred pred;
1375 memset(&pred, 0, sizeof(pred));
1376 pred.op = op;
1378 if (op == OP_AND || op == OP_OR)
1379 return &pred;
1381 if (!operand1 || !operand2) {
1382 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1383 return NULL;
1386 field = trace_find_event_field(call, operand1);
1387 if (!field) {
1388 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1389 return NULL;
1392 strcpy(pred.regex.pattern, operand2);
1393 pred.regex.len = strlen(pred.regex.pattern);
1394 pred.field = field;
1395 return init_pred(ps, field, &pred) ? NULL : &pred;
1398 static int check_preds(struct filter_parse_state *ps)
1400 int n_normal_preds = 0, n_logical_preds = 0;
1401 struct postfix_elt *elt;
1403 list_for_each_entry(elt, &ps->postfix, list) {
1404 if (elt->op == OP_NONE)
1405 continue;
1407 if (elt->op == OP_AND || elt->op == OP_OR) {
1408 n_logical_preds++;
1409 continue;
1411 n_normal_preds++;
1414 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1415 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1416 return -EINVAL;
1419 return 0;
1422 static int count_preds(struct filter_parse_state *ps)
1424 struct postfix_elt *elt;
1425 int n_preds = 0;
1427 list_for_each_entry(elt, &ps->postfix, list) {
1428 if (elt->op == OP_NONE)
1429 continue;
1430 n_preds++;
1433 return n_preds;
1436 struct check_pred_data {
1437 int count;
1438 int max;
1441 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1442 int *err, void *data)
1444 struct check_pred_data *d = data;
1446 if (WARN_ON(d->count++ > d->max)) {
1447 *err = -EINVAL;
1448 return WALK_PRED_ABORT;
1450 return WALK_PRED_DEFAULT;
1454 * The tree is walked at filtering of an event. If the tree is not correctly
1455 * built, it may cause an infinite loop. Check here that the tree does
1456 * indeed terminate.
1458 static int check_pred_tree(struct event_filter *filter,
1459 struct filter_pred *root)
1461 struct check_pred_data data = {
1463 * The max that we can hit a node is three times.
1464 * Once going down, once coming up from left, and
1465 * once coming up from right. This is more than enough
1466 * since leafs are only hit a single time.
1468 .max = 3 * filter->n_preds,
1469 .count = 0,
1472 return walk_pred_tree(filter->preds, root,
1473 check_pred_tree_cb, &data);
1476 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1477 int *err, void *data)
1479 int *count = data;
1481 if ((move == MOVE_DOWN) &&
1482 (pred->left == FILTER_PRED_INVALID))
1483 (*count)++;
1485 return WALK_PRED_DEFAULT;
1488 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1490 int count = 0, ret;
1492 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1493 WARN_ON(ret);
1494 return count;
1497 struct fold_pred_data {
1498 struct filter_pred *root;
1499 int count;
1500 int children;
1503 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1504 int *err, void *data)
1506 struct fold_pred_data *d = data;
1507 struct filter_pred *root = d->root;
1509 if (move != MOVE_DOWN)
1510 return WALK_PRED_DEFAULT;
1511 if (pred->left != FILTER_PRED_INVALID)
1512 return WALK_PRED_DEFAULT;
1514 if (WARN_ON(d->count == d->children)) {
1515 *err = -EINVAL;
1516 return WALK_PRED_ABORT;
1519 pred->index &= ~FILTER_PRED_FOLD;
1520 root->ops[d->count++] = pred->index;
1521 return WALK_PRED_DEFAULT;
1524 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1526 struct fold_pred_data data = {
1527 .root = root,
1528 .count = 0,
1530 int children;
1532 /* No need to keep the fold flag */
1533 root->index &= ~FILTER_PRED_FOLD;
1535 /* If the root is a leaf then do nothing */
1536 if (root->left == FILTER_PRED_INVALID)
1537 return 0;
1539 /* count the children */
1540 children = count_leafs(preds, &preds[root->left]);
1541 children += count_leafs(preds, &preds[root->right]);
1543 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1544 if (!root->ops)
1545 return -ENOMEM;
1547 root->val = children;
1548 data.children = children;
1549 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1552 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1553 int *err, void *data)
1555 struct filter_pred *preds = data;
1557 if (move != MOVE_DOWN)
1558 return WALK_PRED_DEFAULT;
1559 if (!(pred->index & FILTER_PRED_FOLD))
1560 return WALK_PRED_DEFAULT;
1562 *err = fold_pred(preds, pred);
1563 if (*err)
1564 return WALK_PRED_ABORT;
1566 /* eveyrhing below is folded, continue with parent */
1567 return WALK_PRED_PARENT;
1571 * To optimize the processing of the ops, if we have several "ors" or
1572 * "ands" together, we can put them in an array and process them all
1573 * together speeding up the filter logic.
1575 static int fold_pred_tree(struct event_filter *filter,
1576 struct filter_pred *root)
1578 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1579 filter->preds);
1582 static int replace_preds(struct ftrace_event_call *call,
1583 struct event_filter *filter,
1584 struct filter_parse_state *ps,
1585 char *filter_string,
1586 bool dry_run)
1588 char *operand1 = NULL, *operand2 = NULL;
1589 struct filter_pred *pred;
1590 struct filter_pred *root;
1591 struct postfix_elt *elt;
1592 struct pred_stack stack = { }; /* init to NULL */
1593 int err;
1594 int n_preds = 0;
1596 n_preds = count_preds(ps);
1597 if (n_preds >= MAX_FILTER_PRED) {
1598 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1599 return -ENOSPC;
1602 err = check_preds(ps);
1603 if (err)
1604 return err;
1606 if (!dry_run) {
1607 err = __alloc_pred_stack(&stack, n_preds);
1608 if (err)
1609 return err;
1610 err = __alloc_preds(filter, n_preds);
1611 if (err)
1612 goto fail;
1615 n_preds = 0;
1616 list_for_each_entry(elt, &ps->postfix, list) {
1617 if (elt->op == OP_NONE) {
1618 if (!operand1)
1619 operand1 = elt->operand;
1620 else if (!operand2)
1621 operand2 = elt->operand;
1622 else {
1623 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1624 err = -EINVAL;
1625 goto fail;
1627 continue;
1630 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1631 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1632 err = -ENOSPC;
1633 goto fail;
1636 pred = create_pred(ps, call, elt->op, operand1, operand2);
1637 if (!pred) {
1638 err = -EINVAL;
1639 goto fail;
1642 if (!dry_run) {
1643 err = filter_add_pred(ps, filter, pred, &stack);
1644 if (err)
1645 goto fail;
1648 operand1 = operand2 = NULL;
1651 if (!dry_run) {
1652 /* We should have one item left on the stack */
1653 pred = __pop_pred_stack(&stack);
1654 if (!pred)
1655 return -EINVAL;
1656 /* This item is where we start from in matching */
1657 root = pred;
1658 /* Make sure the stack is empty */
1659 pred = __pop_pred_stack(&stack);
1660 if (WARN_ON(pred)) {
1661 err = -EINVAL;
1662 filter->root = NULL;
1663 goto fail;
1665 err = check_pred_tree(filter, root);
1666 if (err)
1667 goto fail;
1669 /* Optimize the tree */
1670 err = fold_pred_tree(filter, root);
1671 if (err)
1672 goto fail;
1674 /* We don't set root until we know it works */
1675 barrier();
1676 filter->root = root;
1679 err = 0;
1680 fail:
1681 __free_pred_stack(&stack);
1682 return err;
1685 static inline void event_set_filtered_flag(struct ftrace_event_file *file)
1687 struct ftrace_event_call *call = file->event_call;
1689 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1690 call->flags |= TRACE_EVENT_FL_FILTERED;
1691 else
1692 file->flags |= FTRACE_EVENT_FL_FILTERED;
1695 static inline void event_set_filter(struct ftrace_event_file *file,
1696 struct event_filter *filter)
1698 struct ftrace_event_call *call = file->event_call;
1700 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1701 rcu_assign_pointer(call->filter, filter);
1702 else
1703 rcu_assign_pointer(file->filter, filter);
1706 static inline void event_clear_filter(struct ftrace_event_file *file)
1708 struct ftrace_event_call *call = file->event_call;
1710 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1711 RCU_INIT_POINTER(call->filter, NULL);
1712 else
1713 RCU_INIT_POINTER(file->filter, NULL);
1716 static inline void
1717 event_set_no_set_filter_flag(struct ftrace_event_file *file)
1719 struct ftrace_event_call *call = file->event_call;
1721 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1722 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1723 else
1724 file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
1727 static inline void
1728 event_clear_no_set_filter_flag(struct ftrace_event_file *file)
1730 struct ftrace_event_call *call = file->event_call;
1732 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1733 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1734 else
1735 file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
1738 static inline bool
1739 event_no_set_filter_flag(struct ftrace_event_file *file)
1741 struct ftrace_event_call *call = file->event_call;
1743 if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
1744 return true;
1746 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1747 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1748 return true;
1750 return false;
1753 struct filter_list {
1754 struct list_head list;
1755 struct event_filter *filter;
1758 static int replace_system_preds(struct event_subsystem *system,
1759 struct trace_array *tr,
1760 struct filter_parse_state *ps,
1761 char *filter_string)
1763 struct ftrace_event_file *file;
1764 struct ftrace_event_call *call;
1765 struct filter_list *filter_item;
1766 struct filter_list *tmp;
1767 LIST_HEAD(filter_list);
1768 bool fail = true;
1769 int err;
1771 list_for_each_entry(file, &tr->events, list) {
1772 call = file->event_call;
1773 if (strcmp(call->class->system, system->name) != 0)
1774 continue;
1777 * Try to see if the filter can be applied
1778 * (filter arg is ignored on dry_run)
1780 err = replace_preds(call, NULL, ps, filter_string, true);
1781 if (err)
1782 event_set_no_set_filter_flag(file);
1783 else
1784 event_clear_no_set_filter_flag(file);
1787 list_for_each_entry(file, &tr->events, list) {
1788 struct event_filter *filter;
1790 call = file->event_call;
1792 if (strcmp(call->class->system, system->name) != 0)
1793 continue;
1795 if (event_no_set_filter_flag(file))
1796 continue;
1798 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1799 if (!filter_item)
1800 goto fail_mem;
1802 list_add_tail(&filter_item->list, &filter_list);
1804 filter_item->filter = __alloc_filter();
1805 if (!filter_item->filter)
1806 goto fail_mem;
1807 filter = filter_item->filter;
1809 /* Can only fail on no memory */
1810 err = replace_filter_string(filter, filter_string);
1811 if (err)
1812 goto fail_mem;
1814 err = replace_preds(call, filter, ps, filter_string, false);
1815 if (err) {
1816 filter_disable(file);
1817 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1818 append_filter_err(ps, filter);
1819 } else
1820 event_set_filtered_flag(file);
1822 * Regardless of if this returned an error, we still
1823 * replace the filter for the call.
1825 filter = event_filter(file);
1826 event_set_filter(file, filter_item->filter);
1827 filter_item->filter = filter;
1829 fail = false;
1832 if (fail)
1833 goto fail;
1836 * The calls can still be using the old filters.
1837 * Do a synchronize_sched() to ensure all calls are
1838 * done with them before we free them.
1840 synchronize_sched();
1841 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1842 __free_filter(filter_item->filter);
1843 list_del(&filter_item->list);
1844 kfree(filter_item);
1846 return 0;
1847 fail:
1848 /* No call succeeded */
1849 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1850 list_del(&filter_item->list);
1851 kfree(filter_item);
1853 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1854 return -EINVAL;
1855 fail_mem:
1856 /* If any call succeeded, we still need to sync */
1857 if (!fail)
1858 synchronize_sched();
1859 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1860 __free_filter(filter_item->filter);
1861 list_del(&filter_item->list);
1862 kfree(filter_item);
1864 return -ENOMEM;
1867 static int create_filter_start(char *filter_str, bool set_str,
1868 struct filter_parse_state **psp,
1869 struct event_filter **filterp)
1871 struct event_filter *filter;
1872 struct filter_parse_state *ps = NULL;
1873 int err = 0;
1875 WARN_ON_ONCE(*psp || *filterp);
1877 /* allocate everything, and if any fails, free all and fail */
1878 filter = __alloc_filter();
1879 if (filter && set_str)
1880 err = replace_filter_string(filter, filter_str);
1882 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1884 if (!filter || !ps || err) {
1885 kfree(ps);
1886 __free_filter(filter);
1887 return -ENOMEM;
1890 /* we're committed to creating a new filter */
1891 *filterp = filter;
1892 *psp = ps;
1894 parse_init(ps, filter_ops, filter_str);
1895 err = filter_parse(ps);
1896 if (err && set_str)
1897 append_filter_err(ps, filter);
1898 return err;
1901 static void create_filter_finish(struct filter_parse_state *ps)
1903 if (ps) {
1904 filter_opstack_clear(ps);
1905 postfix_clear(ps);
1906 kfree(ps);
1911 * create_filter - create a filter for a ftrace_event_call
1912 * @call: ftrace_event_call to create a filter for
1913 * @filter_str: filter string
1914 * @set_str: remember @filter_str and enable detailed error in filter
1915 * @filterp: out param for created filter (always updated on return)
1917 * Creates a filter for @call with @filter_str. If @set_str is %true,
1918 * @filter_str is copied and recorded in the new filter.
1920 * On success, returns 0 and *@filterp points to the new filter. On
1921 * failure, returns -errno and *@filterp may point to %NULL or to a new
1922 * filter. In the latter case, the returned filter contains error
1923 * information if @set_str is %true and the caller is responsible for
1924 * freeing it.
1926 static int create_filter(struct ftrace_event_call *call,
1927 char *filter_str, bool set_str,
1928 struct event_filter **filterp)
1930 struct event_filter *filter = NULL;
1931 struct filter_parse_state *ps = NULL;
1932 int err;
1934 err = create_filter_start(filter_str, set_str, &ps, &filter);
1935 if (!err) {
1936 err = replace_preds(call, filter, ps, filter_str, false);
1937 if (err && set_str)
1938 append_filter_err(ps, filter);
1940 create_filter_finish(ps);
1942 *filterp = filter;
1943 return err;
1946 int create_event_filter(struct ftrace_event_call *call,
1947 char *filter_str, bool set_str,
1948 struct event_filter **filterp)
1950 return create_filter(call, filter_str, set_str, filterp);
1954 * create_system_filter - create a filter for an event_subsystem
1955 * @system: event_subsystem to create a filter for
1956 * @filter_str: filter string
1957 * @filterp: out param for created filter (always updated on return)
1959 * Identical to create_filter() except that it creates a subsystem filter
1960 * and always remembers @filter_str.
1962 static int create_system_filter(struct event_subsystem *system,
1963 struct trace_array *tr,
1964 char *filter_str, struct event_filter **filterp)
1966 struct event_filter *filter = NULL;
1967 struct filter_parse_state *ps = NULL;
1968 int err;
1970 err = create_filter_start(filter_str, true, &ps, &filter);
1971 if (!err) {
1972 err = replace_system_preds(system, tr, ps, filter_str);
1973 if (!err) {
1974 /* System filters just show a default message */
1975 kfree(filter->filter_string);
1976 filter->filter_string = NULL;
1977 } else {
1978 append_filter_err(ps, filter);
1981 create_filter_finish(ps);
1983 *filterp = filter;
1984 return err;
1987 /* caller must hold event_mutex */
1988 int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
1990 struct ftrace_event_call *call = file->event_call;
1991 struct event_filter *filter;
1992 int err;
1994 if (!strcmp(strstrip(filter_string), "0")) {
1995 filter_disable(file);
1996 filter = event_filter(file);
1998 if (!filter)
1999 return 0;
2001 event_clear_filter(file);
2003 /* Make sure the filter is not being used */
2004 synchronize_sched();
2005 __free_filter(filter);
2007 return 0;
2010 err = create_filter(call, filter_string, true, &filter);
2013 * Always swap the call filter with the new filter
2014 * even if there was an error. If there was an error
2015 * in the filter, we disable the filter and show the error
2016 * string
2018 if (filter) {
2019 struct event_filter *tmp;
2021 tmp = event_filter(file);
2022 if (!err)
2023 event_set_filtered_flag(file);
2024 else
2025 filter_disable(file);
2027 event_set_filter(file, filter);
2029 if (tmp) {
2030 /* Make sure the call is done with the filter */
2031 synchronize_sched();
2032 __free_filter(tmp);
2036 return err;
2039 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
2040 char *filter_string)
2042 struct event_subsystem *system = dir->subsystem;
2043 struct trace_array *tr = dir->tr;
2044 struct event_filter *filter;
2045 int err = 0;
2047 mutex_lock(&event_mutex);
2049 /* Make sure the system still has events */
2050 if (!dir->nr_events) {
2051 err = -ENODEV;
2052 goto out_unlock;
2055 if (!strcmp(strstrip(filter_string), "0")) {
2056 filter_free_subsystem_preds(system, tr);
2057 remove_filter_string(system->filter);
2058 filter = system->filter;
2059 system->filter = NULL;
2060 /* Ensure all filters are no longer used */
2061 synchronize_sched();
2062 filter_free_subsystem_filters(system, tr);
2063 __free_filter(filter);
2064 goto out_unlock;
2067 err = create_system_filter(system, tr, filter_string, &filter);
2068 if (filter) {
2070 * No event actually uses the system filter
2071 * we can free it without synchronize_sched().
2073 __free_filter(system->filter);
2074 system->filter = filter;
2076 out_unlock:
2077 mutex_unlock(&event_mutex);
2079 return err;
2082 #ifdef CONFIG_PERF_EVENTS
2084 void ftrace_profile_free_filter(struct perf_event *event)
2086 struct event_filter *filter = event->filter;
2088 event->filter = NULL;
2089 __free_filter(filter);
2092 struct function_filter_data {
2093 struct ftrace_ops *ops;
2094 int first_filter;
2095 int first_notrace;
2098 #ifdef CONFIG_FUNCTION_TRACER
2099 static char **
2100 ftrace_function_filter_re(char *buf, int len, int *count)
2102 char *str, *sep, **re;
2104 str = kstrndup(buf, len, GFP_KERNEL);
2105 if (!str)
2106 return NULL;
2109 * The argv_split function takes white space
2110 * as a separator, so convert ',' into spaces.
2112 while ((sep = strchr(str, ',')))
2113 *sep = ' ';
2115 re = argv_split(GFP_KERNEL, str, count);
2116 kfree(str);
2117 return re;
2120 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2121 int reset, char *re, int len)
2123 int ret;
2125 if (filter)
2126 ret = ftrace_set_filter(ops, re, len, reset);
2127 else
2128 ret = ftrace_set_notrace(ops, re, len, reset);
2130 return ret;
2133 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2134 struct function_filter_data *data)
2136 int i, re_cnt, ret = -EINVAL;
2137 int *reset;
2138 char **re;
2140 reset = filter ? &data->first_filter : &data->first_notrace;
2143 * The 'ip' field could have multiple filters set, separated
2144 * either by space or comma. We first cut the filter and apply
2145 * all pieces separatelly.
2147 re = ftrace_function_filter_re(buf, len, &re_cnt);
2148 if (!re)
2149 return -EINVAL;
2151 for (i = 0; i < re_cnt; i++) {
2152 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2153 re[i], strlen(re[i]));
2154 if (ret)
2155 break;
2157 if (*reset)
2158 *reset = 0;
2161 argv_free(re);
2162 return ret;
2165 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2167 struct ftrace_event_field *field = pred->field;
2169 if (leaf) {
2171 * Check the leaf predicate for function trace, verify:
2172 * - only '==' and '!=' is used
2173 * - the 'ip' field is used
2175 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2176 return -EINVAL;
2178 if (strcmp(field->name, "ip"))
2179 return -EINVAL;
2180 } else {
2182 * Check the non leaf predicate for function trace, verify:
2183 * - only '||' is used
2185 if (pred->op != OP_OR)
2186 return -EINVAL;
2189 return 0;
2192 static int ftrace_function_set_filter_cb(enum move_type move,
2193 struct filter_pred *pred,
2194 int *err, void *data)
2196 /* Checking the node is valid for function trace. */
2197 if ((move != MOVE_DOWN) ||
2198 (pred->left != FILTER_PRED_INVALID)) {
2199 *err = ftrace_function_check_pred(pred, 0);
2200 } else {
2201 *err = ftrace_function_check_pred(pred, 1);
2202 if (*err)
2203 return WALK_PRED_ABORT;
2205 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2206 pred->regex.pattern,
2207 pred->regex.len,
2208 data);
2211 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2214 static int ftrace_function_set_filter(struct perf_event *event,
2215 struct event_filter *filter)
2217 struct function_filter_data data = {
2218 .first_filter = 1,
2219 .first_notrace = 1,
2220 .ops = &event->ftrace_ops,
2223 return walk_pred_tree(filter->preds, filter->root,
2224 ftrace_function_set_filter_cb, &data);
2226 #else
2227 static int ftrace_function_set_filter(struct perf_event *event,
2228 struct event_filter *filter)
2230 return -ENODEV;
2232 #endif /* CONFIG_FUNCTION_TRACER */
2234 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2235 char *filter_str)
2237 int err;
2238 struct event_filter *filter;
2239 struct ftrace_event_call *call;
2241 mutex_lock(&event_mutex);
2243 call = event->tp_event;
2245 err = -EINVAL;
2246 if (!call)
2247 goto out_unlock;
2249 err = -EEXIST;
2250 if (event->filter)
2251 goto out_unlock;
2253 err = create_filter(call, filter_str, false, &filter);
2254 if (err)
2255 goto free_filter;
2257 if (ftrace_event_is_function(call))
2258 err = ftrace_function_set_filter(event, filter);
2259 else
2260 event->filter = filter;
2262 free_filter:
2263 if (err || ftrace_event_is_function(call))
2264 __free_filter(filter);
2266 out_unlock:
2267 mutex_unlock(&event_mutex);
2269 return err;
2272 #endif /* CONFIG_PERF_EVENTS */
2274 #ifdef CONFIG_FTRACE_STARTUP_TEST
2276 #include <linux/types.h>
2277 #include <linux/tracepoint.h>
2279 #define CREATE_TRACE_POINTS
2280 #include "trace_events_filter_test.h"
2282 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2284 .filter = FILTER, \
2285 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2286 .e = ve, .f = vf, .g = vg, .h = vh }, \
2287 .match = m, \
2288 .not_visited = nvisit, \
2290 #define YES 1
2291 #define NO 0
2293 static struct test_filter_data_t {
2294 char *filter;
2295 struct ftrace_raw_ftrace_test_filter rec;
2296 int match;
2297 char *not_visited;
2298 } test_filter_data[] = {
2299 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2300 "e == 1 && f == 1 && g == 1 && h == 1"
2301 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2302 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2303 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2304 #undef FILTER
2305 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2306 "e == 1 || f == 1 || g == 1 || h == 1"
2307 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2308 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2309 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2310 #undef FILTER
2311 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2312 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2313 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2314 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2315 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2316 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2317 #undef FILTER
2318 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2319 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2320 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2321 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2322 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2323 #undef FILTER
2324 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2325 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2326 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2327 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2328 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2329 #undef FILTER
2330 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2331 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2332 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2333 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2334 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2335 #undef FILTER
2336 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2337 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2338 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2339 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2340 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2341 #undef FILTER
2342 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2343 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2344 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2345 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2346 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2349 #undef DATA_REC
2350 #undef FILTER
2351 #undef YES
2352 #undef NO
2354 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2356 static int test_pred_visited;
2358 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2360 struct ftrace_event_field *field = pred->field;
2362 test_pred_visited = 1;
2363 printk(KERN_INFO "\npred visited %s\n", field->name);
2364 return 1;
2367 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2368 int *err, void *data)
2370 char *fields = data;
2372 if ((move == MOVE_DOWN) &&
2373 (pred->left == FILTER_PRED_INVALID)) {
2374 struct ftrace_event_field *field = pred->field;
2376 if (!field) {
2377 WARN(1, "all leafs should have field defined");
2378 return WALK_PRED_DEFAULT;
2380 if (!strchr(fields, *field->name))
2381 return WALK_PRED_DEFAULT;
2383 WARN_ON(!pred->fn);
2384 pred->fn = test_pred_visited_fn;
2386 return WALK_PRED_DEFAULT;
2389 static __init int ftrace_test_event_filter(void)
2391 int i;
2393 printk(KERN_INFO "Testing ftrace filter: ");
2395 for (i = 0; i < DATA_CNT; i++) {
2396 struct event_filter *filter = NULL;
2397 struct test_filter_data_t *d = &test_filter_data[i];
2398 int err;
2400 err = create_filter(&event_ftrace_test_filter, d->filter,
2401 false, &filter);
2402 if (err) {
2403 printk(KERN_INFO
2404 "Failed to get filter for '%s', err %d\n",
2405 d->filter, err);
2406 __free_filter(filter);
2407 break;
2411 * The preemption disabling is not really needed for self
2412 * tests, but the rcu dereference will complain without it.
2414 preempt_disable();
2415 if (*d->not_visited)
2416 walk_pred_tree(filter->preds, filter->root,
2417 test_walk_pred_cb,
2418 d->not_visited);
2420 test_pred_visited = 0;
2421 err = filter_match_preds(filter, &d->rec);
2422 preempt_enable();
2424 __free_filter(filter);
2426 if (test_pred_visited) {
2427 printk(KERN_INFO
2428 "Failed, unwanted pred visited for filter %s\n",
2429 d->filter);
2430 break;
2433 if (err != d->match) {
2434 printk(KERN_INFO
2435 "Failed to match filter '%s', expected %d\n",
2436 d->filter, d->match);
2437 break;
2441 if (i == DATA_CNT)
2442 printk(KERN_CONT "OK\n");
2444 return 0;
2447 late_initcall(ftrace_test_event_filter);
2449 #endif /* CONFIG_FTRACE_STARTUP_TEST */