drm/i915: rip out the PM_IIR WARN
[linux-2.6/libata-dev.git] / kernel / trace / trace_events_filter.c
blob431dba8b754214ee06b4d7163dc3abd91b2718b5
1 /*
2 * trace_events_filter - generic event filtering
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
27 #include "trace.h"
28 #include "trace_output.h"
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
36 enum filter_op_ids
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_NONE,
48 OP_OPEN_PAREN,
51 struct filter_op {
52 int id;
53 char *string;
54 int precedence;
57 static struct filter_op filter_ops[] = {
58 { OP_OR, "||", 1 },
59 { OP_AND, "&&", 2 },
60 { OP_GLOB, "~", 4 },
61 { OP_NE, "!=", 4 },
62 { OP_EQ, "==", 4 },
63 { OP_LT, "<", 5 },
64 { OP_LE, "<=", 5 },
65 { OP_GT, ">", 5 },
66 { OP_GE, ">=", 5 },
67 { OP_NONE, "OP_NONE", 0 },
68 { OP_OPEN_PAREN, "(", 0 },
71 enum {
72 FILT_ERR_NONE,
73 FILT_ERR_INVALID_OP,
74 FILT_ERR_UNBALANCED_PAREN,
75 FILT_ERR_TOO_MANY_OPERANDS,
76 FILT_ERR_OPERAND_TOO_LONG,
77 FILT_ERR_FIELD_NOT_FOUND,
78 FILT_ERR_ILLEGAL_FIELD_OP,
79 FILT_ERR_ILLEGAL_INTVAL,
80 FILT_ERR_BAD_SUBSYS_FILTER,
81 FILT_ERR_TOO_MANY_PREDS,
82 FILT_ERR_MISSING_FIELD,
83 FILT_ERR_INVALID_FILTER,
84 FILT_ERR_IP_FIELD_ONLY,
87 static char *err_text[] = {
88 "No error",
89 "Invalid operator",
90 "Unbalanced parens",
91 "Too many operands",
92 "Operand too long",
93 "Field not found",
94 "Illegal operation for field type",
95 "Illegal integer value",
96 "Couldn't find or set field in one of a subsystem's events",
97 "Too many terms in predicate expression",
98 "Missing field name and/or value",
99 "Meaningless filter expression",
100 "Only 'ip' field is supported for function trace",
103 struct opstack_op {
104 int op;
105 struct list_head list;
108 struct postfix_elt {
109 int op;
110 char *operand;
111 struct list_head list;
114 struct filter_parse_state {
115 struct filter_op *ops;
116 struct list_head opstack;
117 struct list_head postfix;
118 int lasterr;
119 int lasterr_pos;
121 struct {
122 char *string;
123 unsigned int cnt;
124 unsigned int tail;
125 } infix;
127 struct {
128 char string[MAX_FILTER_STR_VAL];
129 int pos;
130 unsigned int tail;
131 } operand;
134 struct pred_stack {
135 struct filter_pred **preds;
136 int index;
139 #define DEFINE_COMPARISON_PRED(type) \
140 static int filter_pred_##type(struct filter_pred *pred, void *event) \
142 type *addr = (type *)(event + pred->offset); \
143 type val = (type)pred->val; \
144 int match = 0; \
146 switch (pred->op) { \
147 case OP_LT: \
148 match = (*addr < val); \
149 break; \
150 case OP_LE: \
151 match = (*addr <= val); \
152 break; \
153 case OP_GT: \
154 match = (*addr > val); \
155 break; \
156 case OP_GE: \
157 match = (*addr >= val); \
158 break; \
159 default: \
160 break; \
163 return match; \
166 #define DEFINE_EQUALITY_PRED(size) \
167 static int filter_pred_##size(struct filter_pred *pred, void *event) \
169 u##size *addr = (u##size *)(event + pred->offset); \
170 u##size val = (u##size)pred->val; \
171 int match; \
173 match = (val == *addr) ^ pred->not; \
175 return match; \
178 DEFINE_COMPARISON_PRED(s64);
179 DEFINE_COMPARISON_PRED(u64);
180 DEFINE_COMPARISON_PRED(s32);
181 DEFINE_COMPARISON_PRED(u32);
182 DEFINE_COMPARISON_PRED(s16);
183 DEFINE_COMPARISON_PRED(u16);
184 DEFINE_COMPARISON_PRED(s8);
185 DEFINE_COMPARISON_PRED(u8);
187 DEFINE_EQUALITY_PRED(64);
188 DEFINE_EQUALITY_PRED(32);
189 DEFINE_EQUALITY_PRED(16);
190 DEFINE_EQUALITY_PRED(8);
192 /* Filter predicate for fixed sized arrays of characters */
193 static int filter_pred_string(struct filter_pred *pred, void *event)
195 char *addr = (char *)(event + pred->offset);
196 int cmp, match;
198 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
200 match = cmp ^ pred->not;
202 return match;
205 /* Filter predicate for char * pointers */
206 static int filter_pred_pchar(struct filter_pred *pred, void *event)
208 char **addr = (char **)(event + pred->offset);
209 int cmp, match;
210 int len = strlen(*addr) + 1; /* including tailing '\0' */
212 cmp = pred->regex.match(*addr, &pred->regex, len);
214 match = cmp ^ pred->not;
216 return match;
220 * Filter predicate for dynamic sized arrays of characters.
221 * These are implemented through a list of strings at the end
222 * of the entry.
223 * Also each of these strings have a field in the entry which
224 * contains its offset from the beginning of the entry.
225 * We have then first to get this field, dereference it
226 * and add it to the address of the entry, and at last we have
227 * the address of the string.
229 static int filter_pred_strloc(struct filter_pred *pred, void *event)
231 u32 str_item = *(u32 *)(event + pred->offset);
232 int str_loc = str_item & 0xffff;
233 int str_len = str_item >> 16;
234 char *addr = (char *)(event + str_loc);
235 int cmp, match;
237 cmp = pred->regex.match(addr, &pred->regex, str_len);
239 match = cmp ^ pred->not;
241 return match;
244 static int filter_pred_none(struct filter_pred *pred, void *event)
246 return 0;
250 * regex_match_foo - Basic regex callbacks
252 * @str: the string to be searched
253 * @r: the regex structure containing the pattern string
254 * @len: the length of the string to be searched (including '\0')
256 * Note:
257 * - @str might not be NULL-terminated if it's of type DYN_STRING
258 * or STATIC_STRING
261 static int regex_match_full(char *str, struct regex *r, int len)
263 if (strncmp(str, r->pattern, len) == 0)
264 return 1;
265 return 0;
268 static int regex_match_front(char *str, struct regex *r, int len)
270 if (strncmp(str, r->pattern, r->len) == 0)
271 return 1;
272 return 0;
275 static int regex_match_middle(char *str, struct regex *r, int len)
277 if (strnstr(str, r->pattern, len))
278 return 1;
279 return 0;
282 static int regex_match_end(char *str, struct regex *r, int len)
284 int strlen = len - 1;
286 if (strlen >= r->len &&
287 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
288 return 1;
289 return 0;
293 * filter_parse_regex - parse a basic regex
294 * @buff: the raw regex
295 * @len: length of the regex
296 * @search: will point to the beginning of the string to compare
297 * @not: tell whether the match will have to be inverted
299 * This passes in a buffer containing a regex and this function will
300 * set search to point to the search part of the buffer and
301 * return the type of search it is (see enum above).
302 * This does modify buff.
304 * Returns enum type.
305 * search returns the pointer to use for comparison.
306 * not returns 1 if buff started with a '!'
307 * 0 otherwise.
309 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
311 int type = MATCH_FULL;
312 int i;
314 if (buff[0] == '!') {
315 *not = 1;
316 buff++;
317 len--;
318 } else
319 *not = 0;
321 *search = buff;
323 for (i = 0; i < len; i++) {
324 if (buff[i] == '*') {
325 if (!i) {
326 *search = buff + 1;
327 type = MATCH_END_ONLY;
328 } else {
329 if (type == MATCH_END_ONLY)
330 type = MATCH_MIDDLE_ONLY;
331 else
332 type = MATCH_FRONT_ONLY;
333 buff[i] = 0;
334 break;
339 return type;
342 static void filter_build_regex(struct filter_pred *pred)
344 struct regex *r = &pred->regex;
345 char *search;
346 enum regex_type type = MATCH_FULL;
347 int not = 0;
349 if (pred->op == OP_GLOB) {
350 type = filter_parse_regex(r->pattern, r->len, &search, &not);
351 r->len = strlen(search);
352 memmove(r->pattern, search, r->len+1);
355 switch (type) {
356 case MATCH_FULL:
357 r->match = regex_match_full;
358 break;
359 case MATCH_FRONT_ONLY:
360 r->match = regex_match_front;
361 break;
362 case MATCH_MIDDLE_ONLY:
363 r->match = regex_match_middle;
364 break;
365 case MATCH_END_ONLY:
366 r->match = regex_match_end;
367 break;
370 pred->not ^= not;
373 enum move_type {
374 MOVE_DOWN,
375 MOVE_UP_FROM_LEFT,
376 MOVE_UP_FROM_RIGHT
379 static struct filter_pred *
380 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
381 int index, enum move_type *move)
383 if (pred->parent & FILTER_PRED_IS_RIGHT)
384 *move = MOVE_UP_FROM_RIGHT;
385 else
386 *move = MOVE_UP_FROM_LEFT;
387 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
389 return pred;
392 enum walk_return {
393 WALK_PRED_ABORT,
394 WALK_PRED_PARENT,
395 WALK_PRED_DEFAULT,
398 typedef int (*filter_pred_walkcb_t) (enum move_type move,
399 struct filter_pred *pred,
400 int *err, void *data);
402 static int walk_pred_tree(struct filter_pred *preds,
403 struct filter_pred *root,
404 filter_pred_walkcb_t cb, void *data)
406 struct filter_pred *pred = root;
407 enum move_type move = MOVE_DOWN;
408 int done = 0;
410 if (!preds)
411 return -EINVAL;
413 do {
414 int err = 0, ret;
416 ret = cb(move, pred, &err, data);
417 if (ret == WALK_PRED_ABORT)
418 return err;
419 if (ret == WALK_PRED_PARENT)
420 goto get_parent;
422 switch (move) {
423 case MOVE_DOWN:
424 if (pred->left != FILTER_PRED_INVALID) {
425 pred = &preds[pred->left];
426 continue;
428 goto get_parent;
429 case MOVE_UP_FROM_LEFT:
430 pred = &preds[pred->right];
431 move = MOVE_DOWN;
432 continue;
433 case MOVE_UP_FROM_RIGHT:
434 get_parent:
435 if (pred == root)
436 break;
437 pred = get_pred_parent(pred, preds,
438 pred->parent,
439 &move);
440 continue;
442 done = 1;
443 } while (!done);
445 /* We are fine. */
446 return 0;
450 * A series of AND or ORs where found together. Instead of
451 * climbing up and down the tree branches, an array of the
452 * ops were made in order of checks. We can just move across
453 * the array and short circuit if needed.
455 static int process_ops(struct filter_pred *preds,
456 struct filter_pred *op, void *rec)
458 struct filter_pred *pred;
459 int match = 0;
460 int type;
461 int i;
464 * Micro-optimization: We set type to true if op
465 * is an OR and false otherwise (AND). Then we
466 * just need to test if the match is equal to
467 * the type, and if it is, we can short circuit the
468 * rest of the checks:
470 * if ((match && op->op == OP_OR) ||
471 * (!match && op->op == OP_AND))
472 * return match;
474 type = op->op == OP_OR;
476 for (i = 0; i < op->val; i++) {
477 pred = &preds[op->ops[i]];
478 if (!WARN_ON_ONCE(!pred->fn))
479 match = pred->fn(pred, rec);
480 if (!!match == type)
481 return match;
483 return match;
486 struct filter_match_preds_data {
487 struct filter_pred *preds;
488 int match;
489 void *rec;
492 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
493 int *err, void *data)
495 struct filter_match_preds_data *d = data;
497 *err = 0;
498 switch (move) {
499 case MOVE_DOWN:
500 /* only AND and OR have children */
501 if (pred->left != FILTER_PRED_INVALID) {
502 /* If ops is set, then it was folded. */
503 if (!pred->ops)
504 return WALK_PRED_DEFAULT;
505 /* We can treat folded ops as a leaf node */
506 d->match = process_ops(d->preds, pred, d->rec);
507 } else {
508 if (!WARN_ON_ONCE(!pred->fn))
509 d->match = pred->fn(pred, d->rec);
512 return WALK_PRED_PARENT;
513 case MOVE_UP_FROM_LEFT:
515 * Check for short circuits.
517 * Optimization: !!match == (pred->op == OP_OR)
518 * is the same as:
519 * if ((match && pred->op == OP_OR) ||
520 * (!match && pred->op == OP_AND))
522 if (!!d->match == (pred->op == OP_OR))
523 return WALK_PRED_PARENT;
524 break;
525 case MOVE_UP_FROM_RIGHT:
526 break;
529 return WALK_PRED_DEFAULT;
532 /* return 1 if event matches, 0 otherwise (discard) */
533 int filter_match_preds(struct event_filter *filter, void *rec)
535 struct filter_pred *preds;
536 struct filter_pred *root;
537 struct filter_match_preds_data data = {
538 /* match is currently meaningless */
539 .match = -1,
540 .rec = rec,
542 int n_preds, ret;
544 /* no filter is considered a match */
545 if (!filter)
546 return 1;
548 n_preds = filter->n_preds;
549 if (!n_preds)
550 return 1;
553 * n_preds, root and filter->preds are protect with preemption disabled.
555 root = rcu_dereference_sched(filter->root);
556 if (!root)
557 return 1;
559 data.preds = preds = rcu_dereference_sched(filter->preds);
560 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
561 WARN_ON(ret);
562 return data.match;
564 EXPORT_SYMBOL_GPL(filter_match_preds);
566 static void parse_error(struct filter_parse_state *ps, int err, int pos)
568 ps->lasterr = err;
569 ps->lasterr_pos = pos;
572 static void remove_filter_string(struct event_filter *filter)
574 if (!filter)
575 return;
577 kfree(filter->filter_string);
578 filter->filter_string = NULL;
581 static int replace_filter_string(struct event_filter *filter,
582 char *filter_string)
584 kfree(filter->filter_string);
585 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
586 if (!filter->filter_string)
587 return -ENOMEM;
589 return 0;
592 static int append_filter_string(struct event_filter *filter,
593 char *string)
595 int newlen;
596 char *new_filter_string;
598 BUG_ON(!filter->filter_string);
599 newlen = strlen(filter->filter_string) + strlen(string) + 1;
600 new_filter_string = kmalloc(newlen, GFP_KERNEL);
601 if (!new_filter_string)
602 return -ENOMEM;
604 strcpy(new_filter_string, filter->filter_string);
605 strcat(new_filter_string, string);
606 kfree(filter->filter_string);
607 filter->filter_string = new_filter_string;
609 return 0;
612 static void append_filter_err(struct filter_parse_state *ps,
613 struct event_filter *filter)
615 int pos = ps->lasterr_pos;
616 char *buf, *pbuf;
618 buf = (char *)__get_free_page(GFP_TEMPORARY);
619 if (!buf)
620 return;
622 append_filter_string(filter, "\n");
623 memset(buf, ' ', PAGE_SIZE);
624 if (pos > PAGE_SIZE - 128)
625 pos = 0;
626 buf[pos] = '^';
627 pbuf = &buf[pos] + 1;
629 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
630 append_filter_string(filter, buf);
631 free_page((unsigned long) buf);
634 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
636 struct event_filter *filter;
638 mutex_lock(&event_mutex);
639 filter = call->filter;
640 if (filter && filter->filter_string)
641 trace_seq_printf(s, "%s\n", filter->filter_string);
642 else
643 trace_seq_printf(s, "none\n");
644 mutex_unlock(&event_mutex);
647 void print_subsystem_event_filter(struct event_subsystem *system,
648 struct trace_seq *s)
650 struct event_filter *filter;
652 mutex_lock(&event_mutex);
653 filter = system->filter;
654 if (filter && filter->filter_string)
655 trace_seq_printf(s, "%s\n", filter->filter_string);
656 else
657 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
658 mutex_unlock(&event_mutex);
661 static struct ftrace_event_field *
662 __find_event_field(struct list_head *head, char *name)
664 struct ftrace_event_field *field;
666 list_for_each_entry(field, head, link) {
667 if (!strcmp(field->name, name))
668 return field;
671 return NULL;
674 static struct ftrace_event_field *
675 find_event_field(struct ftrace_event_call *call, char *name)
677 struct ftrace_event_field *field;
678 struct list_head *head;
680 field = __find_event_field(&ftrace_common_fields, name);
681 if (field)
682 return field;
684 head = trace_get_fields(call);
685 return __find_event_field(head, name);
688 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
690 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
691 if (!stack->preds)
692 return -ENOMEM;
693 stack->index = n_preds;
694 return 0;
697 static void __free_pred_stack(struct pred_stack *stack)
699 kfree(stack->preds);
700 stack->index = 0;
703 static int __push_pred_stack(struct pred_stack *stack,
704 struct filter_pred *pred)
706 int index = stack->index;
708 if (WARN_ON(index == 0))
709 return -ENOSPC;
711 stack->preds[--index] = pred;
712 stack->index = index;
713 return 0;
716 static struct filter_pred *
717 __pop_pred_stack(struct pred_stack *stack)
719 struct filter_pred *pred;
720 int index = stack->index;
722 pred = stack->preds[index++];
723 if (!pred)
724 return NULL;
726 stack->index = index;
727 return pred;
730 static int filter_set_pred(struct event_filter *filter,
731 int idx,
732 struct pred_stack *stack,
733 struct filter_pred *src)
735 struct filter_pred *dest = &filter->preds[idx];
736 struct filter_pred *left;
737 struct filter_pred *right;
739 *dest = *src;
740 dest->index = idx;
742 if (dest->op == OP_OR || dest->op == OP_AND) {
743 right = __pop_pred_stack(stack);
744 left = __pop_pred_stack(stack);
745 if (!left || !right)
746 return -EINVAL;
748 * If both children can be folded
749 * and they are the same op as this op or a leaf,
750 * then this op can be folded.
752 if (left->index & FILTER_PRED_FOLD &&
753 (left->op == dest->op ||
754 left->left == FILTER_PRED_INVALID) &&
755 right->index & FILTER_PRED_FOLD &&
756 (right->op == dest->op ||
757 right->left == FILTER_PRED_INVALID))
758 dest->index |= FILTER_PRED_FOLD;
760 dest->left = left->index & ~FILTER_PRED_FOLD;
761 dest->right = right->index & ~FILTER_PRED_FOLD;
762 left->parent = dest->index & ~FILTER_PRED_FOLD;
763 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
764 } else {
766 * Make dest->left invalid to be used as a quick
767 * way to know this is a leaf node.
769 dest->left = FILTER_PRED_INVALID;
771 /* All leafs allow folding the parent ops. */
772 dest->index |= FILTER_PRED_FOLD;
775 return __push_pred_stack(stack, dest);
778 static void __free_preds(struct event_filter *filter)
780 if (filter->preds) {
781 kfree(filter->preds);
782 filter->preds = NULL;
784 filter->a_preds = 0;
785 filter->n_preds = 0;
788 static void filter_disable(struct ftrace_event_call *call)
790 call->flags &= ~TRACE_EVENT_FL_FILTERED;
793 static void __free_filter(struct event_filter *filter)
795 if (!filter)
796 return;
798 __free_preds(filter);
799 kfree(filter->filter_string);
800 kfree(filter);
804 * Called when destroying the ftrace_event_call.
805 * The call is being freed, so we do not need to worry about
806 * the call being currently used. This is for module code removing
807 * the tracepoints from within it.
809 void destroy_preds(struct ftrace_event_call *call)
811 __free_filter(call->filter);
812 call->filter = NULL;
815 static struct event_filter *__alloc_filter(void)
817 struct event_filter *filter;
819 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
820 return filter;
823 static int __alloc_preds(struct event_filter *filter, int n_preds)
825 struct filter_pred *pred;
826 int i;
828 if (filter->preds)
829 __free_preds(filter);
831 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
833 if (!filter->preds)
834 return -ENOMEM;
836 filter->a_preds = n_preds;
837 filter->n_preds = 0;
839 for (i = 0; i < n_preds; i++) {
840 pred = &filter->preds[i];
841 pred->fn = filter_pred_none;
844 return 0;
847 static void filter_free_subsystem_preds(struct event_subsystem *system)
849 struct ftrace_event_call *call;
851 list_for_each_entry(call, &ftrace_events, list) {
852 if (strcmp(call->class->system, system->name) != 0)
853 continue;
855 filter_disable(call);
856 remove_filter_string(call->filter);
860 static void filter_free_subsystem_filters(struct event_subsystem *system)
862 struct ftrace_event_call *call;
864 list_for_each_entry(call, &ftrace_events, list) {
865 if (strcmp(call->class->system, system->name) != 0)
866 continue;
867 __free_filter(call->filter);
868 call->filter = NULL;
872 static int filter_add_pred(struct filter_parse_state *ps,
873 struct event_filter *filter,
874 struct filter_pred *pred,
875 struct pred_stack *stack)
877 int err;
879 if (WARN_ON(filter->n_preds == filter->a_preds)) {
880 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
881 return -ENOSPC;
884 err = filter_set_pred(filter, filter->n_preds, stack, pred);
885 if (err)
886 return err;
888 filter->n_preds++;
890 return 0;
893 int filter_assign_type(const char *type)
895 if (strstr(type, "__data_loc") && strstr(type, "char"))
896 return FILTER_DYN_STRING;
898 if (strchr(type, '[') && strstr(type, "char"))
899 return FILTER_STATIC_STRING;
901 return FILTER_OTHER;
904 static bool is_function_field(struct ftrace_event_field *field)
906 return field->filter_type == FILTER_TRACE_FN;
909 static bool is_string_field(struct ftrace_event_field *field)
911 return field->filter_type == FILTER_DYN_STRING ||
912 field->filter_type == FILTER_STATIC_STRING ||
913 field->filter_type == FILTER_PTR_STRING;
916 static int is_legal_op(struct ftrace_event_field *field, int op)
918 if (is_string_field(field) &&
919 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
920 return 0;
921 if (!is_string_field(field) && op == OP_GLOB)
922 return 0;
924 return 1;
927 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
928 int field_is_signed)
930 filter_pred_fn_t fn = NULL;
932 switch (field_size) {
933 case 8:
934 if (op == OP_EQ || op == OP_NE)
935 fn = filter_pred_64;
936 else if (field_is_signed)
937 fn = filter_pred_s64;
938 else
939 fn = filter_pred_u64;
940 break;
941 case 4:
942 if (op == OP_EQ || op == OP_NE)
943 fn = filter_pred_32;
944 else if (field_is_signed)
945 fn = filter_pred_s32;
946 else
947 fn = filter_pred_u32;
948 break;
949 case 2:
950 if (op == OP_EQ || op == OP_NE)
951 fn = filter_pred_16;
952 else if (field_is_signed)
953 fn = filter_pred_s16;
954 else
955 fn = filter_pred_u16;
956 break;
957 case 1:
958 if (op == OP_EQ || op == OP_NE)
959 fn = filter_pred_8;
960 else if (field_is_signed)
961 fn = filter_pred_s8;
962 else
963 fn = filter_pred_u8;
964 break;
967 return fn;
970 static int init_pred(struct filter_parse_state *ps,
971 struct ftrace_event_field *field,
972 struct filter_pred *pred)
975 filter_pred_fn_t fn = filter_pred_none;
976 unsigned long long val;
977 int ret;
979 pred->offset = field->offset;
981 if (!is_legal_op(field, pred->op)) {
982 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
983 return -EINVAL;
986 if (is_string_field(field)) {
987 filter_build_regex(pred);
989 if (field->filter_type == FILTER_STATIC_STRING) {
990 fn = filter_pred_string;
991 pred->regex.field_len = field->size;
992 } else if (field->filter_type == FILTER_DYN_STRING)
993 fn = filter_pred_strloc;
994 else
995 fn = filter_pred_pchar;
996 } else if (is_function_field(field)) {
997 if (strcmp(field->name, "ip")) {
998 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
999 return -EINVAL;
1001 } else {
1002 if (field->is_signed)
1003 ret = strict_strtoll(pred->regex.pattern, 0, &val);
1004 else
1005 ret = strict_strtoull(pred->regex.pattern, 0, &val);
1006 if (ret) {
1007 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1008 return -EINVAL;
1010 pred->val = val;
1012 fn = select_comparison_fn(pred->op, field->size,
1013 field->is_signed);
1014 if (!fn) {
1015 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1016 return -EINVAL;
1020 if (pred->op == OP_NE)
1021 pred->not = 1;
1023 pred->fn = fn;
1024 return 0;
1027 static void parse_init(struct filter_parse_state *ps,
1028 struct filter_op *ops,
1029 char *infix_string)
1031 memset(ps, '\0', sizeof(*ps));
1033 ps->infix.string = infix_string;
1034 ps->infix.cnt = strlen(infix_string);
1035 ps->ops = ops;
1037 INIT_LIST_HEAD(&ps->opstack);
1038 INIT_LIST_HEAD(&ps->postfix);
1041 static char infix_next(struct filter_parse_state *ps)
1043 ps->infix.cnt--;
1045 return ps->infix.string[ps->infix.tail++];
1048 static char infix_peek(struct filter_parse_state *ps)
1050 if (ps->infix.tail == strlen(ps->infix.string))
1051 return 0;
1053 return ps->infix.string[ps->infix.tail];
1056 static void infix_advance(struct filter_parse_state *ps)
1058 ps->infix.cnt--;
1059 ps->infix.tail++;
1062 static inline int is_precedence_lower(struct filter_parse_state *ps,
1063 int a, int b)
1065 return ps->ops[a].precedence < ps->ops[b].precedence;
1068 static inline int is_op_char(struct filter_parse_state *ps, char c)
1070 int i;
1072 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1073 if (ps->ops[i].string[0] == c)
1074 return 1;
1077 return 0;
1080 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1082 char nextc = infix_peek(ps);
1083 char opstr[3];
1084 int i;
1086 opstr[0] = firstc;
1087 opstr[1] = nextc;
1088 opstr[2] = '\0';
1090 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1091 if (!strcmp(opstr, ps->ops[i].string)) {
1092 infix_advance(ps);
1093 return ps->ops[i].id;
1097 opstr[1] = '\0';
1099 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1100 if (!strcmp(opstr, ps->ops[i].string))
1101 return ps->ops[i].id;
1104 return OP_NONE;
1107 static inline void clear_operand_string(struct filter_parse_state *ps)
1109 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1110 ps->operand.tail = 0;
1113 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1115 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1116 return -EINVAL;
1118 ps->operand.string[ps->operand.tail++] = c;
1120 return 0;
1123 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1125 struct opstack_op *opstack_op;
1127 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1128 if (!opstack_op)
1129 return -ENOMEM;
1131 opstack_op->op = op;
1132 list_add(&opstack_op->list, &ps->opstack);
1134 return 0;
1137 static int filter_opstack_empty(struct filter_parse_state *ps)
1139 return list_empty(&ps->opstack);
1142 static int filter_opstack_top(struct filter_parse_state *ps)
1144 struct opstack_op *opstack_op;
1146 if (filter_opstack_empty(ps))
1147 return OP_NONE;
1149 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1151 return opstack_op->op;
1154 static int filter_opstack_pop(struct filter_parse_state *ps)
1156 struct opstack_op *opstack_op;
1157 int op;
1159 if (filter_opstack_empty(ps))
1160 return OP_NONE;
1162 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1163 op = opstack_op->op;
1164 list_del(&opstack_op->list);
1166 kfree(opstack_op);
1168 return op;
1171 static void filter_opstack_clear(struct filter_parse_state *ps)
1173 while (!filter_opstack_empty(ps))
1174 filter_opstack_pop(ps);
1177 static char *curr_operand(struct filter_parse_state *ps)
1179 return ps->operand.string;
1182 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1184 struct postfix_elt *elt;
1186 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1187 if (!elt)
1188 return -ENOMEM;
1190 elt->op = OP_NONE;
1191 elt->operand = kstrdup(operand, GFP_KERNEL);
1192 if (!elt->operand) {
1193 kfree(elt);
1194 return -ENOMEM;
1197 list_add_tail(&elt->list, &ps->postfix);
1199 return 0;
1202 static int postfix_append_op(struct filter_parse_state *ps, int op)
1204 struct postfix_elt *elt;
1206 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1207 if (!elt)
1208 return -ENOMEM;
1210 elt->op = op;
1211 elt->operand = NULL;
1213 list_add_tail(&elt->list, &ps->postfix);
1215 return 0;
1218 static void postfix_clear(struct filter_parse_state *ps)
1220 struct postfix_elt *elt;
1222 while (!list_empty(&ps->postfix)) {
1223 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1224 list_del(&elt->list);
1225 kfree(elt->operand);
1226 kfree(elt);
1230 static int filter_parse(struct filter_parse_state *ps)
1232 int in_string = 0;
1233 int op, top_op;
1234 char ch;
1236 while ((ch = infix_next(ps))) {
1237 if (ch == '"') {
1238 in_string ^= 1;
1239 continue;
1242 if (in_string)
1243 goto parse_operand;
1245 if (isspace(ch))
1246 continue;
1248 if (is_op_char(ps, ch)) {
1249 op = infix_get_op(ps, ch);
1250 if (op == OP_NONE) {
1251 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1252 return -EINVAL;
1255 if (strlen(curr_operand(ps))) {
1256 postfix_append_operand(ps, curr_operand(ps));
1257 clear_operand_string(ps);
1260 while (!filter_opstack_empty(ps)) {
1261 top_op = filter_opstack_top(ps);
1262 if (!is_precedence_lower(ps, top_op, op)) {
1263 top_op = filter_opstack_pop(ps);
1264 postfix_append_op(ps, top_op);
1265 continue;
1267 break;
1270 filter_opstack_push(ps, op);
1271 continue;
1274 if (ch == '(') {
1275 filter_opstack_push(ps, OP_OPEN_PAREN);
1276 continue;
1279 if (ch == ')') {
1280 if (strlen(curr_operand(ps))) {
1281 postfix_append_operand(ps, curr_operand(ps));
1282 clear_operand_string(ps);
1285 top_op = filter_opstack_pop(ps);
1286 while (top_op != OP_NONE) {
1287 if (top_op == OP_OPEN_PAREN)
1288 break;
1289 postfix_append_op(ps, top_op);
1290 top_op = filter_opstack_pop(ps);
1292 if (top_op == OP_NONE) {
1293 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1294 return -EINVAL;
1296 continue;
1298 parse_operand:
1299 if (append_operand_char(ps, ch)) {
1300 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1301 return -EINVAL;
1305 if (strlen(curr_operand(ps)))
1306 postfix_append_operand(ps, curr_operand(ps));
1308 while (!filter_opstack_empty(ps)) {
1309 top_op = filter_opstack_pop(ps);
1310 if (top_op == OP_NONE)
1311 break;
1312 if (top_op == OP_OPEN_PAREN) {
1313 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1314 return -EINVAL;
1316 postfix_append_op(ps, top_op);
1319 return 0;
1322 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1323 struct ftrace_event_call *call,
1324 int op, char *operand1, char *operand2)
1326 struct ftrace_event_field *field;
1327 static struct filter_pred pred;
1329 memset(&pred, 0, sizeof(pred));
1330 pred.op = op;
1332 if (op == OP_AND || op == OP_OR)
1333 return &pred;
1335 if (!operand1 || !operand2) {
1336 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1337 return NULL;
1340 field = find_event_field(call, operand1);
1341 if (!field) {
1342 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1343 return NULL;
1346 strcpy(pred.regex.pattern, operand2);
1347 pred.regex.len = strlen(pred.regex.pattern);
1348 pred.field = field;
1349 return init_pred(ps, field, &pred) ? NULL : &pred;
1352 static int check_preds(struct filter_parse_state *ps)
1354 int n_normal_preds = 0, n_logical_preds = 0;
1355 struct postfix_elt *elt;
1357 list_for_each_entry(elt, &ps->postfix, list) {
1358 if (elt->op == OP_NONE)
1359 continue;
1361 if (elt->op == OP_AND || elt->op == OP_OR) {
1362 n_logical_preds++;
1363 continue;
1365 n_normal_preds++;
1368 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1369 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1370 return -EINVAL;
1373 return 0;
1376 static int count_preds(struct filter_parse_state *ps)
1378 struct postfix_elt *elt;
1379 int n_preds = 0;
1381 list_for_each_entry(elt, &ps->postfix, list) {
1382 if (elt->op == OP_NONE)
1383 continue;
1384 n_preds++;
1387 return n_preds;
1390 struct check_pred_data {
1391 int count;
1392 int max;
1395 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1396 int *err, void *data)
1398 struct check_pred_data *d = data;
1400 if (WARN_ON(d->count++ > d->max)) {
1401 *err = -EINVAL;
1402 return WALK_PRED_ABORT;
1404 return WALK_PRED_DEFAULT;
1408 * The tree is walked at filtering of an event. If the tree is not correctly
1409 * built, it may cause an infinite loop. Check here that the tree does
1410 * indeed terminate.
1412 static int check_pred_tree(struct event_filter *filter,
1413 struct filter_pred *root)
1415 struct check_pred_data data = {
1417 * The max that we can hit a node is three times.
1418 * Once going down, once coming up from left, and
1419 * once coming up from right. This is more than enough
1420 * since leafs are only hit a single time.
1422 .max = 3 * filter->n_preds,
1423 .count = 0,
1426 return walk_pred_tree(filter->preds, root,
1427 check_pred_tree_cb, &data);
1430 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1431 int *err, void *data)
1433 int *count = data;
1435 if ((move == MOVE_DOWN) &&
1436 (pred->left == FILTER_PRED_INVALID))
1437 (*count)++;
1439 return WALK_PRED_DEFAULT;
1442 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1444 int count = 0, ret;
1446 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1447 WARN_ON(ret);
1448 return count;
1451 struct fold_pred_data {
1452 struct filter_pred *root;
1453 int count;
1454 int children;
1457 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1458 int *err, void *data)
1460 struct fold_pred_data *d = data;
1461 struct filter_pred *root = d->root;
1463 if (move != MOVE_DOWN)
1464 return WALK_PRED_DEFAULT;
1465 if (pred->left != FILTER_PRED_INVALID)
1466 return WALK_PRED_DEFAULT;
1468 if (WARN_ON(d->count == d->children)) {
1469 *err = -EINVAL;
1470 return WALK_PRED_ABORT;
1473 pred->index &= ~FILTER_PRED_FOLD;
1474 root->ops[d->count++] = pred->index;
1475 return WALK_PRED_DEFAULT;
1478 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1480 struct fold_pred_data data = {
1481 .root = root,
1482 .count = 0,
1484 int children;
1486 /* No need to keep the fold flag */
1487 root->index &= ~FILTER_PRED_FOLD;
1489 /* If the root is a leaf then do nothing */
1490 if (root->left == FILTER_PRED_INVALID)
1491 return 0;
1493 /* count the children */
1494 children = count_leafs(preds, &preds[root->left]);
1495 children += count_leafs(preds, &preds[root->right]);
1497 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1498 if (!root->ops)
1499 return -ENOMEM;
1501 root->val = children;
1502 data.children = children;
1503 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1506 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1507 int *err, void *data)
1509 struct filter_pred *preds = data;
1511 if (move != MOVE_DOWN)
1512 return WALK_PRED_DEFAULT;
1513 if (!(pred->index & FILTER_PRED_FOLD))
1514 return WALK_PRED_DEFAULT;
1516 *err = fold_pred(preds, pred);
1517 if (*err)
1518 return WALK_PRED_ABORT;
1520 /* eveyrhing below is folded, continue with parent */
1521 return WALK_PRED_PARENT;
1525 * To optimize the processing of the ops, if we have several "ors" or
1526 * "ands" together, we can put them in an array and process them all
1527 * together speeding up the filter logic.
1529 static int fold_pred_tree(struct event_filter *filter,
1530 struct filter_pred *root)
1532 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1533 filter->preds);
1536 static int replace_preds(struct ftrace_event_call *call,
1537 struct event_filter *filter,
1538 struct filter_parse_state *ps,
1539 char *filter_string,
1540 bool dry_run)
1542 char *operand1 = NULL, *operand2 = NULL;
1543 struct filter_pred *pred;
1544 struct filter_pred *root;
1545 struct postfix_elt *elt;
1546 struct pred_stack stack = { }; /* init to NULL */
1547 int err;
1548 int n_preds = 0;
1550 n_preds = count_preds(ps);
1551 if (n_preds >= MAX_FILTER_PRED) {
1552 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1553 return -ENOSPC;
1556 err = check_preds(ps);
1557 if (err)
1558 return err;
1560 if (!dry_run) {
1561 err = __alloc_pred_stack(&stack, n_preds);
1562 if (err)
1563 return err;
1564 err = __alloc_preds(filter, n_preds);
1565 if (err)
1566 goto fail;
1569 n_preds = 0;
1570 list_for_each_entry(elt, &ps->postfix, list) {
1571 if (elt->op == OP_NONE) {
1572 if (!operand1)
1573 operand1 = elt->operand;
1574 else if (!operand2)
1575 operand2 = elt->operand;
1576 else {
1577 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1578 err = -EINVAL;
1579 goto fail;
1581 continue;
1584 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1585 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1586 err = -ENOSPC;
1587 goto fail;
1590 pred = create_pred(ps, call, elt->op, operand1, operand2);
1591 if (!pred) {
1592 err = -EINVAL;
1593 goto fail;
1596 if (!dry_run) {
1597 err = filter_add_pred(ps, filter, pred, &stack);
1598 if (err)
1599 goto fail;
1602 operand1 = operand2 = NULL;
1605 if (!dry_run) {
1606 /* We should have one item left on the stack */
1607 pred = __pop_pred_stack(&stack);
1608 if (!pred)
1609 return -EINVAL;
1610 /* This item is where we start from in matching */
1611 root = pred;
1612 /* Make sure the stack is empty */
1613 pred = __pop_pred_stack(&stack);
1614 if (WARN_ON(pred)) {
1615 err = -EINVAL;
1616 filter->root = NULL;
1617 goto fail;
1619 err = check_pred_tree(filter, root);
1620 if (err)
1621 goto fail;
1623 /* Optimize the tree */
1624 err = fold_pred_tree(filter, root);
1625 if (err)
1626 goto fail;
1628 /* We don't set root until we know it works */
1629 barrier();
1630 filter->root = root;
1633 err = 0;
1634 fail:
1635 __free_pred_stack(&stack);
1636 return err;
1639 struct filter_list {
1640 struct list_head list;
1641 struct event_filter *filter;
1644 static int replace_system_preds(struct event_subsystem *system,
1645 struct filter_parse_state *ps,
1646 char *filter_string)
1648 struct ftrace_event_call *call;
1649 struct filter_list *filter_item;
1650 struct filter_list *tmp;
1651 LIST_HEAD(filter_list);
1652 bool fail = true;
1653 int err;
1655 list_for_each_entry(call, &ftrace_events, list) {
1657 if (strcmp(call->class->system, system->name) != 0)
1658 continue;
1661 * Try to see if the filter can be applied
1662 * (filter arg is ignored on dry_run)
1664 err = replace_preds(call, NULL, ps, filter_string, true);
1665 if (err)
1666 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1667 else
1668 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1671 list_for_each_entry(call, &ftrace_events, list) {
1672 struct event_filter *filter;
1674 if (strcmp(call->class->system, system->name) != 0)
1675 continue;
1677 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1678 continue;
1680 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1681 if (!filter_item)
1682 goto fail_mem;
1684 list_add_tail(&filter_item->list, &filter_list);
1686 filter_item->filter = __alloc_filter();
1687 if (!filter_item->filter)
1688 goto fail_mem;
1689 filter = filter_item->filter;
1691 /* Can only fail on no memory */
1692 err = replace_filter_string(filter, filter_string);
1693 if (err)
1694 goto fail_mem;
1696 err = replace_preds(call, filter, ps, filter_string, false);
1697 if (err) {
1698 filter_disable(call);
1699 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1700 append_filter_err(ps, filter);
1701 } else
1702 call->flags |= TRACE_EVENT_FL_FILTERED;
1704 * Regardless of if this returned an error, we still
1705 * replace the filter for the call.
1707 filter = call->filter;
1708 rcu_assign_pointer(call->filter, filter_item->filter);
1709 filter_item->filter = filter;
1711 fail = false;
1714 if (fail)
1715 goto fail;
1718 * The calls can still be using the old filters.
1719 * Do a synchronize_sched() to ensure all calls are
1720 * done with them before we free them.
1722 synchronize_sched();
1723 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1724 __free_filter(filter_item->filter);
1725 list_del(&filter_item->list);
1726 kfree(filter_item);
1728 return 0;
1729 fail:
1730 /* No call succeeded */
1731 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1732 list_del(&filter_item->list);
1733 kfree(filter_item);
1735 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1736 return -EINVAL;
1737 fail_mem:
1738 /* If any call succeeded, we still need to sync */
1739 if (!fail)
1740 synchronize_sched();
1741 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1742 __free_filter(filter_item->filter);
1743 list_del(&filter_item->list);
1744 kfree(filter_item);
1746 return -ENOMEM;
1749 static int create_filter_start(char *filter_str, bool set_str,
1750 struct filter_parse_state **psp,
1751 struct event_filter **filterp)
1753 struct event_filter *filter;
1754 struct filter_parse_state *ps = NULL;
1755 int err = 0;
1757 WARN_ON_ONCE(*psp || *filterp);
1759 /* allocate everything, and if any fails, free all and fail */
1760 filter = __alloc_filter();
1761 if (filter && set_str)
1762 err = replace_filter_string(filter, filter_str);
1764 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1766 if (!filter || !ps || err) {
1767 kfree(ps);
1768 __free_filter(filter);
1769 return -ENOMEM;
1772 /* we're committed to creating a new filter */
1773 *filterp = filter;
1774 *psp = ps;
1776 parse_init(ps, filter_ops, filter_str);
1777 err = filter_parse(ps);
1778 if (err && set_str)
1779 append_filter_err(ps, filter);
1780 return err;
1783 static void create_filter_finish(struct filter_parse_state *ps)
1785 if (ps) {
1786 filter_opstack_clear(ps);
1787 postfix_clear(ps);
1788 kfree(ps);
1793 * create_filter - create a filter for a ftrace_event_call
1794 * @call: ftrace_event_call to create a filter for
1795 * @filter_str: filter string
1796 * @set_str: remember @filter_str and enable detailed error in filter
1797 * @filterp: out param for created filter (always updated on return)
1799 * Creates a filter for @call with @filter_str. If @set_str is %true,
1800 * @filter_str is copied and recorded in the new filter.
1802 * On success, returns 0 and *@filterp points to the new filter. On
1803 * failure, returns -errno and *@filterp may point to %NULL or to a new
1804 * filter. In the latter case, the returned filter contains error
1805 * information if @set_str is %true and the caller is responsible for
1806 * freeing it.
1808 static int create_filter(struct ftrace_event_call *call,
1809 char *filter_str, bool set_str,
1810 struct event_filter **filterp)
1812 struct event_filter *filter = NULL;
1813 struct filter_parse_state *ps = NULL;
1814 int err;
1816 err = create_filter_start(filter_str, set_str, &ps, &filter);
1817 if (!err) {
1818 err = replace_preds(call, filter, ps, filter_str, false);
1819 if (err && set_str)
1820 append_filter_err(ps, filter);
1822 create_filter_finish(ps);
1824 *filterp = filter;
1825 return err;
1829 * create_system_filter - create a filter for an event_subsystem
1830 * @system: event_subsystem to create a filter for
1831 * @filter_str: filter string
1832 * @filterp: out param for created filter (always updated on return)
1834 * Identical to create_filter() except that it creates a subsystem filter
1835 * and always remembers @filter_str.
1837 static int create_system_filter(struct event_subsystem *system,
1838 char *filter_str, struct event_filter **filterp)
1840 struct event_filter *filter = NULL;
1841 struct filter_parse_state *ps = NULL;
1842 int err;
1844 err = create_filter_start(filter_str, true, &ps, &filter);
1845 if (!err) {
1846 err = replace_system_preds(system, ps, filter_str);
1847 if (!err) {
1848 /* System filters just show a default message */
1849 kfree(filter->filter_string);
1850 filter->filter_string = NULL;
1851 } else {
1852 append_filter_err(ps, filter);
1855 create_filter_finish(ps);
1857 *filterp = filter;
1858 return err;
1861 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1863 struct event_filter *filter;
1864 int err = 0;
1866 mutex_lock(&event_mutex);
1868 if (!strcmp(strstrip(filter_string), "0")) {
1869 filter_disable(call);
1870 filter = call->filter;
1871 if (!filter)
1872 goto out_unlock;
1873 RCU_INIT_POINTER(call->filter, NULL);
1874 /* Make sure the filter is not being used */
1875 synchronize_sched();
1876 __free_filter(filter);
1877 goto out_unlock;
1880 err = create_filter(call, filter_string, true, &filter);
1883 * Always swap the call filter with the new filter
1884 * even if there was an error. If there was an error
1885 * in the filter, we disable the filter and show the error
1886 * string
1888 if (filter) {
1889 struct event_filter *tmp = call->filter;
1891 if (!err)
1892 call->flags |= TRACE_EVENT_FL_FILTERED;
1893 else
1894 filter_disable(call);
1896 rcu_assign_pointer(call->filter, filter);
1898 if (tmp) {
1899 /* Make sure the call is done with the filter */
1900 synchronize_sched();
1901 __free_filter(tmp);
1904 out_unlock:
1905 mutex_unlock(&event_mutex);
1907 return err;
1910 int apply_subsystem_event_filter(struct event_subsystem *system,
1911 char *filter_string)
1913 struct event_filter *filter;
1914 int err = 0;
1916 mutex_lock(&event_mutex);
1918 /* Make sure the system still has events */
1919 if (!system->nr_events) {
1920 err = -ENODEV;
1921 goto out_unlock;
1924 if (!strcmp(strstrip(filter_string), "0")) {
1925 filter_free_subsystem_preds(system);
1926 remove_filter_string(system->filter);
1927 filter = system->filter;
1928 system->filter = NULL;
1929 /* Ensure all filters are no longer used */
1930 synchronize_sched();
1931 filter_free_subsystem_filters(system);
1932 __free_filter(filter);
1933 goto out_unlock;
1936 err = create_system_filter(system, filter_string, &filter);
1937 if (filter) {
1939 * No event actually uses the system filter
1940 * we can free it without synchronize_sched().
1942 __free_filter(system->filter);
1943 system->filter = filter;
1945 out_unlock:
1946 mutex_unlock(&event_mutex);
1948 return err;
1951 #ifdef CONFIG_PERF_EVENTS
1953 void ftrace_profile_free_filter(struct perf_event *event)
1955 struct event_filter *filter = event->filter;
1957 event->filter = NULL;
1958 __free_filter(filter);
1961 struct function_filter_data {
1962 struct ftrace_ops *ops;
1963 int first_filter;
1964 int first_notrace;
1967 #ifdef CONFIG_FUNCTION_TRACER
1968 static char **
1969 ftrace_function_filter_re(char *buf, int len, int *count)
1971 char *str, *sep, **re;
1973 str = kstrndup(buf, len, GFP_KERNEL);
1974 if (!str)
1975 return NULL;
1978 * The argv_split function takes white space
1979 * as a separator, so convert ',' into spaces.
1981 while ((sep = strchr(str, ',')))
1982 *sep = ' ';
1984 re = argv_split(GFP_KERNEL, str, count);
1985 kfree(str);
1986 return re;
1989 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1990 int reset, char *re, int len)
1992 int ret;
1994 if (filter)
1995 ret = ftrace_set_filter(ops, re, len, reset);
1996 else
1997 ret = ftrace_set_notrace(ops, re, len, reset);
1999 return ret;
2002 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2003 struct function_filter_data *data)
2005 int i, re_cnt, ret;
2006 int *reset;
2007 char **re;
2009 reset = filter ? &data->first_filter : &data->first_notrace;
2012 * The 'ip' field could have multiple filters set, separated
2013 * either by space or comma. We first cut the filter and apply
2014 * all pieces separatelly.
2016 re = ftrace_function_filter_re(buf, len, &re_cnt);
2017 if (!re)
2018 return -EINVAL;
2020 for (i = 0; i < re_cnt; i++) {
2021 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2022 re[i], strlen(re[i]));
2023 if (ret)
2024 break;
2026 if (*reset)
2027 *reset = 0;
2030 argv_free(re);
2031 return ret;
2034 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2036 struct ftrace_event_field *field = pred->field;
2038 if (leaf) {
2040 * Check the leaf predicate for function trace, verify:
2041 * - only '==' and '!=' is used
2042 * - the 'ip' field is used
2044 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2045 return -EINVAL;
2047 if (strcmp(field->name, "ip"))
2048 return -EINVAL;
2049 } else {
2051 * Check the non leaf predicate for function trace, verify:
2052 * - only '||' is used
2054 if (pred->op != OP_OR)
2055 return -EINVAL;
2058 return 0;
2061 static int ftrace_function_set_filter_cb(enum move_type move,
2062 struct filter_pred *pred,
2063 int *err, void *data)
2065 /* Checking the node is valid for function trace. */
2066 if ((move != MOVE_DOWN) ||
2067 (pred->left != FILTER_PRED_INVALID)) {
2068 *err = ftrace_function_check_pred(pred, 0);
2069 } else {
2070 *err = ftrace_function_check_pred(pred, 1);
2071 if (*err)
2072 return WALK_PRED_ABORT;
2074 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2075 pred->regex.pattern,
2076 pred->regex.len,
2077 data);
2080 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2083 static int ftrace_function_set_filter(struct perf_event *event,
2084 struct event_filter *filter)
2086 struct function_filter_data data = {
2087 .first_filter = 1,
2088 .first_notrace = 1,
2089 .ops = &event->ftrace_ops,
2092 return walk_pred_tree(filter->preds, filter->root,
2093 ftrace_function_set_filter_cb, &data);
2095 #else
2096 static int ftrace_function_set_filter(struct perf_event *event,
2097 struct event_filter *filter)
2099 return -ENODEV;
2101 #endif /* CONFIG_FUNCTION_TRACER */
2103 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2104 char *filter_str)
2106 int err;
2107 struct event_filter *filter;
2108 struct ftrace_event_call *call;
2110 mutex_lock(&event_mutex);
2112 call = event->tp_event;
2114 err = -EINVAL;
2115 if (!call)
2116 goto out_unlock;
2118 err = -EEXIST;
2119 if (event->filter)
2120 goto out_unlock;
2122 err = create_filter(call, filter_str, false, &filter);
2123 if (err)
2124 goto free_filter;
2126 if (ftrace_event_is_function(call))
2127 err = ftrace_function_set_filter(event, filter);
2128 else
2129 event->filter = filter;
2131 free_filter:
2132 if (err || ftrace_event_is_function(call))
2133 __free_filter(filter);
2135 out_unlock:
2136 mutex_unlock(&event_mutex);
2138 return err;
2141 #endif /* CONFIG_PERF_EVENTS */
2143 #ifdef CONFIG_FTRACE_STARTUP_TEST
2145 #include <linux/types.h>
2146 #include <linux/tracepoint.h>
2148 #define CREATE_TRACE_POINTS
2149 #include "trace_events_filter_test.h"
2151 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2153 .filter = FILTER, \
2154 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2155 .e = ve, .f = vf, .g = vg, .h = vh }, \
2156 .match = m, \
2157 .not_visited = nvisit, \
2159 #define YES 1
2160 #define NO 0
2162 static struct test_filter_data_t {
2163 char *filter;
2164 struct ftrace_raw_ftrace_test_filter rec;
2165 int match;
2166 char *not_visited;
2167 } test_filter_data[] = {
2168 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2169 "e == 1 && f == 1 && g == 1 && h == 1"
2170 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2171 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2172 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2173 #undef FILTER
2174 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2175 "e == 1 || f == 1 || g == 1 || h == 1"
2176 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2177 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2178 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2179 #undef FILTER
2180 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2181 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2182 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2183 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2184 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2185 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2186 #undef FILTER
2187 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2188 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2189 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2190 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2191 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2192 #undef FILTER
2193 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2194 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2195 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2196 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2197 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2198 #undef FILTER
2199 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2200 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2201 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2202 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2203 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2204 #undef FILTER
2205 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2206 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2207 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2208 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2209 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2210 #undef FILTER
2211 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2212 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2213 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2214 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2215 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2218 #undef DATA_REC
2219 #undef FILTER
2220 #undef YES
2221 #undef NO
2223 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2225 static int test_pred_visited;
2227 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2229 struct ftrace_event_field *field = pred->field;
2231 test_pred_visited = 1;
2232 printk(KERN_INFO "\npred visited %s\n", field->name);
2233 return 1;
2236 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2237 int *err, void *data)
2239 char *fields = data;
2241 if ((move == MOVE_DOWN) &&
2242 (pred->left == FILTER_PRED_INVALID)) {
2243 struct ftrace_event_field *field = pred->field;
2245 if (!field) {
2246 WARN(1, "all leafs should have field defined");
2247 return WALK_PRED_DEFAULT;
2249 if (!strchr(fields, *field->name))
2250 return WALK_PRED_DEFAULT;
2252 WARN_ON(!pred->fn);
2253 pred->fn = test_pred_visited_fn;
2255 return WALK_PRED_DEFAULT;
2258 static __init int ftrace_test_event_filter(void)
2260 int i;
2262 printk(KERN_INFO "Testing ftrace filter: ");
2264 for (i = 0; i < DATA_CNT; i++) {
2265 struct event_filter *filter = NULL;
2266 struct test_filter_data_t *d = &test_filter_data[i];
2267 int err;
2269 err = create_filter(&event_ftrace_test_filter, d->filter,
2270 false, &filter);
2271 if (err) {
2272 printk(KERN_INFO
2273 "Failed to get filter for '%s', err %d\n",
2274 d->filter, err);
2275 __free_filter(filter);
2276 break;
2280 * The preemption disabling is not really needed for self
2281 * tests, but the rcu dereference will complain without it.
2283 preempt_disable();
2284 if (*d->not_visited)
2285 walk_pred_tree(filter->preds, filter->root,
2286 test_walk_pred_cb,
2287 d->not_visited);
2289 test_pred_visited = 0;
2290 err = filter_match_preds(filter, &d->rec);
2291 preempt_enable();
2293 __free_filter(filter);
2295 if (test_pred_visited) {
2296 printk(KERN_INFO
2297 "Failed, unwanted pred visited for filter %s\n",
2298 d->filter);
2299 break;
2302 if (err != d->match) {
2303 printk(KERN_INFO
2304 "Failed to match filter '%s', expected %d\n",
2305 d->filter, d->match);
2306 break;
2310 if (i == DATA_CNT)
2311 printk(KERN_CONT "OK\n");
2313 return 0;
2316 late_initcall(ftrace_test_event_filter);
2318 #endif /* CONFIG_FTRACE_STARTUP_TEST */