split dev_queue
[cor.git] / kernel / trace / trace_uprobe.c
blob352073d36585ade088c3610725a1af273f4fb5a8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
23 #define UPROBE_EVENT_SYSTEM "uprobes"
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
37 struct trace_uprobe_filter {
38 rwlock_t rwlock;
39 int nr_systemwide;
40 struct list_head perf_events;
43 static int trace_uprobe_create(int argc, const char **argv);
44 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
45 static int trace_uprobe_release(struct dyn_event *ev);
46 static bool trace_uprobe_is_busy(struct dyn_event *ev);
47 static bool trace_uprobe_match(const char *system, const char *event,
48 int argc, const char **argv, struct dyn_event *ev);
50 static struct dyn_event_operations trace_uprobe_ops = {
51 .create = trace_uprobe_create,
52 .show = trace_uprobe_show,
53 .is_busy = trace_uprobe_is_busy,
54 .free = trace_uprobe_release,
55 .match = trace_uprobe_match,
59 * uprobe event core functions
61 struct trace_uprobe {
62 struct dyn_event devent;
63 struct trace_uprobe_filter filter;
64 struct uprobe_consumer consumer;
65 struct path path;
66 struct inode *inode;
67 char *filename;
68 unsigned long offset;
69 unsigned long ref_ctr_offset;
70 unsigned long nhit;
71 struct trace_probe tp;
74 static bool is_trace_uprobe(struct dyn_event *ev)
76 return ev->ops == &trace_uprobe_ops;
79 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
81 return container_of(ev, struct trace_uprobe, devent);
84 /**
85 * for_each_trace_uprobe - iterate over the trace_uprobe list
86 * @pos: the struct trace_uprobe * for each entry
87 * @dpos: the struct dyn_event * to use as a loop cursor
89 #define for_each_trace_uprobe(pos, dpos) \
90 for_each_dyn_event(dpos) \
91 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
93 #define SIZEOF_TRACE_UPROBE(n) \
94 (offsetof(struct trace_uprobe, tp.args) + \
95 (sizeof(struct probe_arg) * (n)))
97 static int register_uprobe_event(struct trace_uprobe *tu);
98 static int unregister_uprobe_event(struct trace_uprobe *tu);
100 struct uprobe_dispatch_data {
101 struct trace_uprobe *tu;
102 unsigned long bp_addr;
105 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
106 static int uretprobe_dispatcher(struct uprobe_consumer *con,
107 unsigned long func, struct pt_regs *regs);
109 #ifdef CONFIG_STACK_GROWSUP
110 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
112 return addr - (n * sizeof(long));
114 #else
115 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
117 return addr + (n * sizeof(long));
119 #endif
121 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
123 unsigned long ret;
124 unsigned long addr = user_stack_pointer(regs);
126 addr = adjust_stack_addr(addr, n);
128 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
129 return 0;
131 return ret;
135 * Uprobes-specific fetch functions
137 static nokprobe_inline int
138 probe_mem_read(void *dest, void *src, size_t size)
140 void __user *vaddr = (void __force __user *)src;
142 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
145 static nokprobe_inline int
146 probe_mem_read_user(void *dest, void *src, size_t size)
148 return probe_mem_read(dest, src, size);
152 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
153 * length and relative data location.
155 static nokprobe_inline int
156 fetch_store_string(unsigned long addr, void *dest, void *base)
158 long ret;
159 u32 loc = *(u32 *)dest;
160 int maxlen = get_loc_len(loc);
161 u8 *dst = get_loc_data(dest, base);
162 void __user *src = (void __force __user *) addr;
164 if (unlikely(!maxlen))
165 return -ENOMEM;
167 if (addr == FETCH_TOKEN_COMM)
168 ret = strlcpy(dst, current->comm, maxlen);
169 else
170 ret = strncpy_from_user(dst, src, maxlen);
171 if (ret >= 0) {
172 if (ret == maxlen)
173 dst[ret - 1] = '\0';
174 else
176 * Include the terminating null byte. In this case it
177 * was copied by strncpy_from_user but not accounted
178 * for in ret.
180 ret++;
181 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
184 return ret;
187 static nokprobe_inline int
188 fetch_store_string_user(unsigned long addr, void *dest, void *base)
190 return fetch_store_string(addr, dest, base);
193 /* Return the length of string -- including null terminal byte */
194 static nokprobe_inline int
195 fetch_store_strlen(unsigned long addr)
197 int len;
198 void __user *vaddr = (void __force __user *) addr;
200 if (addr == FETCH_TOKEN_COMM)
201 len = strlen(current->comm) + 1;
202 else
203 len = strnlen_user(vaddr, MAX_STRING_SIZE);
205 return (len > MAX_STRING_SIZE) ? 0 : len;
208 static nokprobe_inline int
209 fetch_store_strlen_user(unsigned long addr)
211 return fetch_store_strlen(addr);
214 static unsigned long translate_user_vaddr(unsigned long file_offset)
216 unsigned long base_addr;
217 struct uprobe_dispatch_data *udd;
219 udd = (void *) current->utask->vaddr;
221 base_addr = udd->bp_addr - udd->tu->offset;
222 return base_addr + file_offset;
225 /* Note that we don't verify it, since the code does not come from user space */
226 static int
227 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
228 void *base)
230 unsigned long val;
232 /* 1st stage: get value from context */
233 switch (code->op) {
234 case FETCH_OP_REG:
235 val = regs_get_register(regs, code->param);
236 break;
237 case FETCH_OP_STACK:
238 val = get_user_stack_nth(regs, code->param);
239 break;
240 case FETCH_OP_STACKP:
241 val = user_stack_pointer(regs);
242 break;
243 case FETCH_OP_RETVAL:
244 val = regs_return_value(regs);
245 break;
246 case FETCH_OP_IMM:
247 val = code->immediate;
248 break;
249 case FETCH_OP_COMM:
250 val = FETCH_TOKEN_COMM;
251 break;
252 case FETCH_OP_DATA:
253 val = (unsigned long)code->data;
254 break;
255 case FETCH_OP_FOFFS:
256 val = translate_user_vaddr(code->immediate);
257 break;
258 default:
259 return -EILSEQ;
261 code++;
263 return process_fetch_insn_bottom(code, val, dest, base);
265 NOKPROBE_SYMBOL(process_fetch_insn)
267 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
269 rwlock_init(&filter->rwlock);
270 filter->nr_systemwide = 0;
271 INIT_LIST_HEAD(&filter->perf_events);
274 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
276 return !filter->nr_systemwide && list_empty(&filter->perf_events);
279 static inline bool is_ret_probe(struct trace_uprobe *tu)
281 return tu->consumer.ret_handler != NULL;
284 static bool trace_uprobe_is_busy(struct dyn_event *ev)
286 struct trace_uprobe *tu = to_trace_uprobe(ev);
288 return trace_probe_is_enabled(&tu->tp);
291 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
292 int argc, const char **argv)
294 char buf[MAX_ARGSTR_LEN + 1];
295 int len;
297 if (!argc)
298 return true;
300 len = strlen(tu->filename);
301 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
302 return false;
304 if (tu->ref_ctr_offset == 0)
305 snprintf(buf, sizeof(buf), "0x%0*lx",
306 (int)(sizeof(void *) * 2), tu->offset);
307 else
308 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
309 (int)(sizeof(void *) * 2), tu->offset,
310 tu->ref_ctr_offset);
311 if (strcmp(buf, &argv[0][len + 1]))
312 return false;
314 argc--; argv++;
316 return trace_probe_match_command_args(&tu->tp, argc, argv);
319 static bool trace_uprobe_match(const char *system, const char *event,
320 int argc, const char **argv, struct dyn_event *ev)
322 struct trace_uprobe *tu = to_trace_uprobe(ev);
324 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
325 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
326 trace_uprobe_match_command_head(tu, argc, argv);
329 static nokprobe_inline struct trace_uprobe *
330 trace_uprobe_primary_from_call(struct trace_event_call *call)
332 struct trace_probe *tp;
334 tp = trace_probe_primary_from_call(call);
335 if (WARN_ON_ONCE(!tp))
336 return NULL;
338 return container_of(tp, struct trace_uprobe, tp);
342 * Allocate new trace_uprobe and initialize it (including uprobes).
344 static struct trace_uprobe *
345 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
347 struct trace_uprobe *tu;
348 int ret;
350 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
351 if (!tu)
352 return ERR_PTR(-ENOMEM);
354 ret = trace_probe_init(&tu->tp, event, group);
355 if (ret < 0)
356 goto error;
358 dyn_event_init(&tu->devent, &trace_uprobe_ops);
359 tu->consumer.handler = uprobe_dispatcher;
360 if (is_ret)
361 tu->consumer.ret_handler = uretprobe_dispatcher;
362 init_trace_uprobe_filter(&tu->filter);
363 return tu;
365 error:
366 kfree(tu);
368 return ERR_PTR(ret);
371 static void free_trace_uprobe(struct trace_uprobe *tu)
373 if (!tu)
374 return;
376 path_put(&tu->path);
377 trace_probe_cleanup(&tu->tp);
378 kfree(tu->filename);
379 kfree(tu);
382 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
384 struct dyn_event *pos;
385 struct trace_uprobe *tu;
387 for_each_trace_uprobe(tu, pos)
388 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
389 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
390 return tu;
392 return NULL;
395 /* Unregister a trace_uprobe and probe_event */
396 static int unregister_trace_uprobe(struct trace_uprobe *tu)
398 int ret;
400 if (trace_probe_has_sibling(&tu->tp))
401 goto unreg;
403 ret = unregister_uprobe_event(tu);
404 if (ret)
405 return ret;
407 unreg:
408 dyn_event_remove(&tu->devent);
409 trace_probe_unlink(&tu->tp);
410 free_trace_uprobe(tu);
411 return 0;
414 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
415 struct trace_uprobe *comp)
417 struct trace_probe_event *tpe = orig->tp.event;
418 struct trace_probe *pos;
419 struct inode *comp_inode = d_real_inode(comp->path.dentry);
420 int i;
422 list_for_each_entry(pos, &tpe->probes, list) {
423 orig = container_of(pos, struct trace_uprobe, tp);
424 if (comp_inode != d_real_inode(orig->path.dentry) ||
425 comp->offset != orig->offset)
426 continue;
429 * trace_probe_compare_arg_type() ensured that nr_args and
430 * each argument name and type are same. Let's compare comm.
432 for (i = 0; i < orig->tp.nr_args; i++) {
433 if (strcmp(orig->tp.args[i].comm,
434 comp->tp.args[i].comm))
435 break;
438 if (i == orig->tp.nr_args)
439 return true;
442 return false;
445 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
447 int ret;
449 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
450 if (ret) {
451 /* Note that argument starts index = 2 */
452 trace_probe_log_set_index(ret + 1);
453 trace_probe_log_err(0, DIFF_ARG_TYPE);
454 return -EEXIST;
456 if (trace_uprobe_has_same_uprobe(to, tu)) {
457 trace_probe_log_set_index(0);
458 trace_probe_log_err(0, SAME_PROBE);
459 return -EEXIST;
462 /* Append to existing event */
463 ret = trace_probe_append(&tu->tp, &to->tp);
464 if (!ret)
465 dyn_event_add(&tu->devent);
467 return ret;
471 * Uprobe with multiple reference counter is not allowed. i.e.
472 * If inode and offset matches, reference counter offset *must*
473 * match as well. Though, there is one exception: If user is
474 * replacing old trace_uprobe with new one(same group/event),
475 * then we allow same uprobe with new reference counter as far
476 * as the new one does not conflict with any other existing
477 * ones.
479 static int validate_ref_ctr_offset(struct trace_uprobe *new)
481 struct dyn_event *pos;
482 struct trace_uprobe *tmp;
483 struct inode *new_inode = d_real_inode(new->path.dentry);
485 for_each_trace_uprobe(tmp, pos) {
486 if (new_inode == d_real_inode(tmp->path.dentry) &&
487 new->offset == tmp->offset &&
488 new->ref_ctr_offset != tmp->ref_ctr_offset) {
489 pr_warn("Reference counter offset mismatch.");
490 return -EINVAL;
493 return 0;
496 /* Register a trace_uprobe and probe_event */
497 static int register_trace_uprobe(struct trace_uprobe *tu)
499 struct trace_uprobe *old_tu;
500 int ret;
502 mutex_lock(&event_mutex);
504 ret = validate_ref_ctr_offset(tu);
505 if (ret)
506 goto end;
508 /* register as an event */
509 old_tu = find_probe_event(trace_probe_name(&tu->tp),
510 trace_probe_group_name(&tu->tp));
511 if (old_tu) {
512 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
513 trace_probe_log_set_index(0);
514 trace_probe_log_err(0, DIFF_PROBE_TYPE);
515 ret = -EEXIST;
516 } else {
517 ret = append_trace_uprobe(tu, old_tu);
519 goto end;
522 ret = register_uprobe_event(tu);
523 if (ret) {
524 pr_warn("Failed to register probe event(%d)\n", ret);
525 goto end;
528 dyn_event_add(&tu->devent);
530 end:
531 mutex_unlock(&event_mutex);
533 return ret;
537 * Argument syntax:
538 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
540 static int trace_uprobe_create(int argc, const char **argv)
542 struct trace_uprobe *tu;
543 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
544 char *arg, *filename, *rctr, *rctr_end, *tmp;
545 char buf[MAX_EVENT_NAME_LEN];
546 struct path path;
547 unsigned long offset, ref_ctr_offset;
548 bool is_return = false;
549 int i, ret;
551 ret = 0;
552 ref_ctr_offset = 0;
554 switch (argv[0][0]) {
555 case 'r':
556 is_return = true;
557 break;
558 case 'p':
559 break;
560 default:
561 return -ECANCELED;
564 if (argc < 2)
565 return -ECANCELED;
567 if (argv[0][1] == ':')
568 event = &argv[0][2];
570 if (!strchr(argv[1], '/'))
571 return -ECANCELED;
573 filename = kstrdup(argv[1], GFP_KERNEL);
574 if (!filename)
575 return -ENOMEM;
577 /* Find the last occurrence, in case the path contains ':' too. */
578 arg = strrchr(filename, ':');
579 if (!arg || !isdigit(arg[1])) {
580 kfree(filename);
581 return -ECANCELED;
584 trace_probe_log_init("trace_uprobe", argc, argv);
585 trace_probe_log_set_index(1); /* filename is the 2nd argument */
587 *arg++ = '\0';
588 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
589 if (ret) {
590 trace_probe_log_err(0, FILE_NOT_FOUND);
591 kfree(filename);
592 trace_probe_log_clear();
593 return ret;
595 if (!d_is_reg(path.dentry)) {
596 trace_probe_log_err(0, NO_REGULAR_FILE);
597 ret = -EINVAL;
598 goto fail_address_parse;
601 /* Parse reference counter offset if specified. */
602 rctr = strchr(arg, '(');
603 if (rctr) {
604 rctr_end = strchr(rctr, ')');
605 if (!rctr_end) {
606 ret = -EINVAL;
607 rctr_end = rctr + strlen(rctr);
608 trace_probe_log_err(rctr_end - filename,
609 REFCNT_OPEN_BRACE);
610 goto fail_address_parse;
611 } else if (rctr_end[1] != '\0') {
612 ret = -EINVAL;
613 trace_probe_log_err(rctr_end + 1 - filename,
614 BAD_REFCNT_SUFFIX);
615 goto fail_address_parse;
618 *rctr++ = '\0';
619 *rctr_end = '\0';
620 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
621 if (ret) {
622 trace_probe_log_err(rctr - filename, BAD_REFCNT);
623 goto fail_address_parse;
627 /* Parse uprobe offset. */
628 ret = kstrtoul(arg, 0, &offset);
629 if (ret) {
630 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
631 goto fail_address_parse;
634 /* setup a probe */
635 trace_probe_log_set_index(0);
636 if (event) {
637 ret = traceprobe_parse_event_name(&event, &group, buf,
638 event - argv[0]);
639 if (ret)
640 goto fail_address_parse;
641 } else {
642 char *tail;
643 char *ptr;
645 tail = kstrdup(kbasename(filename), GFP_KERNEL);
646 if (!tail) {
647 ret = -ENOMEM;
648 goto fail_address_parse;
651 ptr = strpbrk(tail, ".-_");
652 if (ptr)
653 *ptr = '\0';
655 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
656 event = buf;
657 kfree(tail);
660 argc -= 2;
661 argv += 2;
663 tu = alloc_trace_uprobe(group, event, argc, is_return);
664 if (IS_ERR(tu)) {
665 ret = PTR_ERR(tu);
666 /* This must return -ENOMEM otherwise there is a bug */
667 WARN_ON_ONCE(ret != -ENOMEM);
668 goto fail_address_parse;
670 tu->offset = offset;
671 tu->ref_ctr_offset = ref_ctr_offset;
672 tu->path = path;
673 tu->filename = filename;
675 /* parse arguments */
676 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
677 tmp = kstrdup(argv[i], GFP_KERNEL);
678 if (!tmp) {
679 ret = -ENOMEM;
680 goto error;
683 trace_probe_log_set_index(i + 2);
684 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
685 is_return ? TPARG_FL_RETURN : 0);
686 kfree(tmp);
687 if (ret)
688 goto error;
691 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
692 if (ret < 0)
693 goto error;
695 ret = register_trace_uprobe(tu);
696 if (!ret)
697 goto out;
699 error:
700 free_trace_uprobe(tu);
701 out:
702 trace_probe_log_clear();
703 return ret;
705 fail_address_parse:
706 trace_probe_log_clear();
707 path_put(&path);
708 kfree(filename);
710 return ret;
713 static int create_or_delete_trace_uprobe(int argc, char **argv)
715 int ret;
717 if (argv[0][0] == '-')
718 return dyn_event_release(argc, argv, &trace_uprobe_ops);
720 ret = trace_uprobe_create(argc, (const char **)argv);
721 return ret == -ECANCELED ? -EINVAL : ret;
724 static int trace_uprobe_release(struct dyn_event *ev)
726 struct trace_uprobe *tu = to_trace_uprobe(ev);
728 return unregister_trace_uprobe(tu);
731 /* Probes listing interfaces */
732 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
734 struct trace_uprobe *tu = to_trace_uprobe(ev);
735 char c = is_ret_probe(tu) ? 'r' : 'p';
736 int i;
738 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
739 trace_probe_name(&tu->tp), tu->filename,
740 (int)(sizeof(void *) * 2), tu->offset);
742 if (tu->ref_ctr_offset)
743 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
745 for (i = 0; i < tu->tp.nr_args; i++)
746 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
748 seq_putc(m, '\n');
749 return 0;
752 static int probes_seq_show(struct seq_file *m, void *v)
754 struct dyn_event *ev = v;
756 if (!is_trace_uprobe(ev))
757 return 0;
759 return trace_uprobe_show(m, ev);
762 static const struct seq_operations probes_seq_op = {
763 .start = dyn_event_seq_start,
764 .next = dyn_event_seq_next,
765 .stop = dyn_event_seq_stop,
766 .show = probes_seq_show
769 static int probes_open(struct inode *inode, struct file *file)
771 int ret;
773 ret = security_locked_down(LOCKDOWN_TRACEFS);
774 if (ret)
775 return ret;
777 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
778 ret = dyn_events_release_all(&trace_uprobe_ops);
779 if (ret)
780 return ret;
783 return seq_open(file, &probes_seq_op);
786 static ssize_t probes_write(struct file *file, const char __user *buffer,
787 size_t count, loff_t *ppos)
789 return trace_parse_run_command(file, buffer, count, ppos,
790 create_or_delete_trace_uprobe);
793 static const struct file_operations uprobe_events_ops = {
794 .owner = THIS_MODULE,
795 .open = probes_open,
796 .read = seq_read,
797 .llseek = seq_lseek,
798 .release = seq_release,
799 .write = probes_write,
802 /* Probes profiling interfaces */
803 static int probes_profile_seq_show(struct seq_file *m, void *v)
805 struct dyn_event *ev = v;
806 struct trace_uprobe *tu;
808 if (!is_trace_uprobe(ev))
809 return 0;
811 tu = to_trace_uprobe(ev);
812 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
813 trace_probe_name(&tu->tp), tu->nhit);
814 return 0;
817 static const struct seq_operations profile_seq_op = {
818 .start = dyn_event_seq_start,
819 .next = dyn_event_seq_next,
820 .stop = dyn_event_seq_stop,
821 .show = probes_profile_seq_show
824 static int profile_open(struct inode *inode, struct file *file)
826 int ret;
828 ret = security_locked_down(LOCKDOWN_TRACEFS);
829 if (ret)
830 return ret;
832 return seq_open(file, &profile_seq_op);
835 static const struct file_operations uprobe_profile_ops = {
836 .owner = THIS_MODULE,
837 .open = profile_open,
838 .read = seq_read,
839 .llseek = seq_lseek,
840 .release = seq_release,
843 struct uprobe_cpu_buffer {
844 struct mutex mutex;
845 void *buf;
847 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
848 static int uprobe_buffer_refcnt;
850 static int uprobe_buffer_init(void)
852 int cpu, err_cpu;
854 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
855 if (uprobe_cpu_buffer == NULL)
856 return -ENOMEM;
858 for_each_possible_cpu(cpu) {
859 struct page *p = alloc_pages_node(cpu_to_node(cpu),
860 GFP_KERNEL, 0);
861 if (p == NULL) {
862 err_cpu = cpu;
863 goto err;
865 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
866 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
869 return 0;
871 err:
872 for_each_possible_cpu(cpu) {
873 if (cpu == err_cpu)
874 break;
875 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
878 free_percpu(uprobe_cpu_buffer);
879 return -ENOMEM;
882 static int uprobe_buffer_enable(void)
884 int ret = 0;
886 BUG_ON(!mutex_is_locked(&event_mutex));
888 if (uprobe_buffer_refcnt++ == 0) {
889 ret = uprobe_buffer_init();
890 if (ret < 0)
891 uprobe_buffer_refcnt--;
894 return ret;
897 static void uprobe_buffer_disable(void)
899 int cpu;
901 BUG_ON(!mutex_is_locked(&event_mutex));
903 if (--uprobe_buffer_refcnt == 0) {
904 for_each_possible_cpu(cpu)
905 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
906 cpu)->buf);
908 free_percpu(uprobe_cpu_buffer);
909 uprobe_cpu_buffer = NULL;
913 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
915 struct uprobe_cpu_buffer *ucb;
916 int cpu;
918 cpu = raw_smp_processor_id();
919 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
922 * Use per-cpu buffers for fastest access, but we might migrate
923 * so the mutex makes sure we have sole access to it.
925 mutex_lock(&ucb->mutex);
927 return ucb;
930 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
932 mutex_unlock(&ucb->mutex);
935 static void __uprobe_trace_func(struct trace_uprobe *tu,
936 unsigned long func, struct pt_regs *regs,
937 struct uprobe_cpu_buffer *ucb, int dsize,
938 struct trace_event_file *trace_file)
940 struct uprobe_trace_entry_head *entry;
941 struct ring_buffer_event *event;
942 struct ring_buffer *buffer;
943 void *data;
944 int size, esize;
945 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
947 WARN_ON(call != trace_file->event_call);
949 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
950 return;
952 if (trace_trigger_soft_disabled(trace_file))
953 return;
955 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
956 size = esize + tu->tp.size + dsize;
957 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
958 call->event.type, size, 0, 0);
959 if (!event)
960 return;
962 entry = ring_buffer_event_data(event);
963 if (is_ret_probe(tu)) {
964 entry->vaddr[0] = func;
965 entry->vaddr[1] = instruction_pointer(regs);
966 data = DATAOF_TRACE_ENTRY(entry, true);
967 } else {
968 entry->vaddr[0] = instruction_pointer(regs);
969 data = DATAOF_TRACE_ENTRY(entry, false);
972 memcpy(data, ucb->buf, tu->tp.size + dsize);
974 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
977 /* uprobe handler */
978 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
979 struct uprobe_cpu_buffer *ucb, int dsize)
981 struct event_file_link *link;
983 if (is_ret_probe(tu))
984 return 0;
986 rcu_read_lock();
987 trace_probe_for_each_link_rcu(link, &tu->tp)
988 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
989 rcu_read_unlock();
991 return 0;
994 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
995 struct pt_regs *regs,
996 struct uprobe_cpu_buffer *ucb, int dsize)
998 struct event_file_link *link;
1000 rcu_read_lock();
1001 trace_probe_for_each_link_rcu(link, &tu->tp)
1002 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1003 rcu_read_unlock();
1006 /* Event entry printers */
1007 static enum print_line_t
1008 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1010 struct uprobe_trace_entry_head *entry;
1011 struct trace_seq *s = &iter->seq;
1012 struct trace_uprobe *tu;
1013 u8 *data;
1015 entry = (struct uprobe_trace_entry_head *)iter->ent;
1016 tu = trace_uprobe_primary_from_call(
1017 container_of(event, struct trace_event_call, event));
1018 if (unlikely(!tu))
1019 goto out;
1021 if (is_ret_probe(tu)) {
1022 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1023 trace_probe_name(&tu->tp),
1024 entry->vaddr[1], entry->vaddr[0]);
1025 data = DATAOF_TRACE_ENTRY(entry, true);
1026 } else {
1027 trace_seq_printf(s, "%s: (0x%lx)",
1028 trace_probe_name(&tu->tp),
1029 entry->vaddr[0]);
1030 data = DATAOF_TRACE_ENTRY(entry, false);
1033 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1034 goto out;
1036 trace_seq_putc(s, '\n');
1038 out:
1039 return trace_handle_return(s);
1042 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1043 enum uprobe_filter_ctx ctx,
1044 struct mm_struct *mm);
1046 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1048 int ret;
1050 tu->consumer.filter = filter;
1051 tu->inode = d_real_inode(tu->path.dentry);
1053 if (tu->ref_ctr_offset)
1054 ret = uprobe_register_refctr(tu->inode, tu->offset,
1055 tu->ref_ctr_offset, &tu->consumer);
1056 else
1057 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1059 if (ret)
1060 tu->inode = NULL;
1062 return ret;
1065 static void __probe_event_disable(struct trace_probe *tp)
1067 struct trace_probe *pos;
1068 struct trace_uprobe *tu;
1070 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1071 tu = container_of(pos, struct trace_uprobe, tp);
1072 if (!tu->inode)
1073 continue;
1075 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1077 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1078 tu->inode = NULL;
1082 static int probe_event_enable(struct trace_event_call *call,
1083 struct trace_event_file *file, filter_func_t filter)
1085 struct trace_probe *pos, *tp;
1086 struct trace_uprobe *tu;
1087 bool enabled;
1088 int ret;
1090 tp = trace_probe_primary_from_call(call);
1091 if (WARN_ON_ONCE(!tp))
1092 return -ENODEV;
1093 enabled = trace_probe_is_enabled(tp);
1095 /* This may also change "enabled" state */
1096 if (file) {
1097 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1098 return -EINTR;
1100 ret = trace_probe_add_file(tp, file);
1101 if (ret < 0)
1102 return ret;
1103 } else {
1104 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1105 return -EINTR;
1107 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1110 tu = container_of(tp, struct trace_uprobe, tp);
1111 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1113 if (enabled)
1114 return 0;
1116 ret = uprobe_buffer_enable();
1117 if (ret)
1118 goto err_flags;
1120 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1121 tu = container_of(pos, struct trace_uprobe, tp);
1122 ret = trace_uprobe_enable(tu, filter);
1123 if (ret) {
1124 __probe_event_disable(tp);
1125 goto err_buffer;
1129 return 0;
1131 err_buffer:
1132 uprobe_buffer_disable();
1134 err_flags:
1135 if (file)
1136 trace_probe_remove_file(tp, file);
1137 else
1138 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1140 return ret;
1143 static void probe_event_disable(struct trace_event_call *call,
1144 struct trace_event_file *file)
1146 struct trace_probe *tp;
1148 tp = trace_probe_primary_from_call(call);
1149 if (WARN_ON_ONCE(!tp))
1150 return;
1152 if (!trace_probe_is_enabled(tp))
1153 return;
1155 if (file) {
1156 if (trace_probe_remove_file(tp, file) < 0)
1157 return;
1159 if (trace_probe_is_enabled(tp))
1160 return;
1161 } else
1162 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1164 __probe_event_disable(tp);
1165 uprobe_buffer_disable();
1168 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1170 int ret, size;
1171 struct uprobe_trace_entry_head field;
1172 struct trace_uprobe *tu;
1174 tu = trace_uprobe_primary_from_call(event_call);
1175 if (unlikely(!tu))
1176 return -ENODEV;
1178 if (is_ret_probe(tu)) {
1179 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1180 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1181 size = SIZEOF_TRACE_ENTRY(true);
1182 } else {
1183 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1184 size = SIZEOF_TRACE_ENTRY(false);
1187 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1190 #ifdef CONFIG_PERF_EVENTS
1191 static bool
1192 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1194 struct perf_event *event;
1196 if (filter->nr_systemwide)
1197 return true;
1199 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1200 if (event->hw.target->mm == mm)
1201 return true;
1204 return false;
1207 static inline bool
1208 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1210 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1213 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1215 bool done;
1217 write_lock(&tu->filter.rwlock);
1218 if (event->hw.target) {
1219 list_del(&event->hw.tp_list);
1220 done = tu->filter.nr_systemwide ||
1221 (event->hw.target->flags & PF_EXITING) ||
1222 uprobe_filter_event(tu, event);
1223 } else {
1224 tu->filter.nr_systemwide--;
1225 done = tu->filter.nr_systemwide;
1227 write_unlock(&tu->filter.rwlock);
1229 if (!done)
1230 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1232 return 0;
1235 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1237 bool done;
1238 int err;
1240 write_lock(&tu->filter.rwlock);
1241 if (event->hw.target) {
1243 * event->parent != NULL means copy_process(), we can avoid
1244 * uprobe_apply(). current->mm must be probed and we can rely
1245 * on dup_mmap() which preserves the already installed bp's.
1247 * attr.enable_on_exec means that exec/mmap will install the
1248 * breakpoints we need.
1250 done = tu->filter.nr_systemwide ||
1251 event->parent || event->attr.enable_on_exec ||
1252 uprobe_filter_event(tu, event);
1253 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1254 } else {
1255 done = tu->filter.nr_systemwide;
1256 tu->filter.nr_systemwide++;
1258 write_unlock(&tu->filter.rwlock);
1260 err = 0;
1261 if (!done) {
1262 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1263 if (err)
1264 uprobe_perf_close(tu, event);
1266 return err;
1269 static int uprobe_perf_multi_call(struct trace_event_call *call,
1270 struct perf_event *event,
1271 int (*op)(struct trace_uprobe *tu, struct perf_event *event))
1273 struct trace_probe *pos, *tp;
1274 struct trace_uprobe *tu;
1275 int ret = 0;
1277 tp = trace_probe_primary_from_call(call);
1278 if (WARN_ON_ONCE(!tp))
1279 return -ENODEV;
1281 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1282 tu = container_of(pos, struct trace_uprobe, tp);
1283 ret = op(tu, event);
1284 if (ret)
1285 break;
1288 return ret;
1290 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1291 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1293 struct trace_uprobe *tu;
1294 int ret;
1296 tu = container_of(uc, struct trace_uprobe, consumer);
1297 read_lock(&tu->filter.rwlock);
1298 ret = __uprobe_perf_filter(&tu->filter, mm);
1299 read_unlock(&tu->filter.rwlock);
1301 return ret;
1304 static void __uprobe_perf_func(struct trace_uprobe *tu,
1305 unsigned long func, struct pt_regs *regs,
1306 struct uprobe_cpu_buffer *ucb, int dsize)
1308 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1309 struct uprobe_trace_entry_head *entry;
1310 struct hlist_head *head;
1311 void *data;
1312 int size, esize;
1313 int rctx;
1315 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1316 return;
1318 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1320 size = esize + tu->tp.size + dsize;
1321 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1322 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1323 return;
1325 preempt_disable();
1326 head = this_cpu_ptr(call->perf_events);
1327 if (hlist_empty(head))
1328 goto out;
1330 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1331 if (!entry)
1332 goto out;
1334 if (is_ret_probe(tu)) {
1335 entry->vaddr[0] = func;
1336 entry->vaddr[1] = instruction_pointer(regs);
1337 data = DATAOF_TRACE_ENTRY(entry, true);
1338 } else {
1339 entry->vaddr[0] = instruction_pointer(regs);
1340 data = DATAOF_TRACE_ENTRY(entry, false);
1343 memcpy(data, ucb->buf, tu->tp.size + dsize);
1345 if (size - esize > tu->tp.size + dsize) {
1346 int len = tu->tp.size + dsize;
1348 memset(data + len, 0, size - esize - len);
1351 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1352 head, NULL);
1353 out:
1354 preempt_enable();
1357 /* uprobe profile handler */
1358 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1359 struct uprobe_cpu_buffer *ucb, int dsize)
1361 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1362 return UPROBE_HANDLER_REMOVE;
1364 if (!is_ret_probe(tu))
1365 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1366 return 0;
1369 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1370 struct pt_regs *regs,
1371 struct uprobe_cpu_buffer *ucb, int dsize)
1373 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1376 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1377 const char **filename, u64 *probe_offset,
1378 bool perf_type_tracepoint)
1380 const char *pevent = trace_event_name(event->tp_event);
1381 const char *group = event->tp_event->class->system;
1382 struct trace_uprobe *tu;
1384 if (perf_type_tracepoint)
1385 tu = find_probe_event(pevent, group);
1386 else
1387 tu = event->tp_event->data;
1388 if (!tu)
1389 return -EINVAL;
1391 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1392 : BPF_FD_TYPE_UPROBE;
1393 *filename = tu->filename;
1394 *probe_offset = tu->offset;
1395 return 0;
1397 #endif /* CONFIG_PERF_EVENTS */
1399 static int
1400 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1401 void *data)
1403 struct trace_event_file *file = data;
1405 switch (type) {
1406 case TRACE_REG_REGISTER:
1407 return probe_event_enable(event, file, NULL);
1409 case TRACE_REG_UNREGISTER:
1410 probe_event_disable(event, file);
1411 return 0;
1413 #ifdef CONFIG_PERF_EVENTS
1414 case TRACE_REG_PERF_REGISTER:
1415 return probe_event_enable(event, NULL, uprobe_perf_filter);
1417 case TRACE_REG_PERF_UNREGISTER:
1418 probe_event_disable(event, NULL);
1419 return 0;
1421 case TRACE_REG_PERF_OPEN:
1422 return uprobe_perf_multi_call(event, data, uprobe_perf_open);
1424 case TRACE_REG_PERF_CLOSE:
1425 return uprobe_perf_multi_call(event, data, uprobe_perf_close);
1427 #endif
1428 default:
1429 return 0;
1431 return 0;
1434 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1436 struct trace_uprobe *tu;
1437 struct uprobe_dispatch_data udd;
1438 struct uprobe_cpu_buffer *ucb;
1439 int dsize, esize;
1440 int ret = 0;
1443 tu = container_of(con, struct trace_uprobe, consumer);
1444 tu->nhit++;
1446 udd.tu = tu;
1447 udd.bp_addr = instruction_pointer(regs);
1449 current->utask->vaddr = (unsigned long) &udd;
1451 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1452 return 0;
1454 dsize = __get_data_size(&tu->tp, regs);
1455 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1457 ucb = uprobe_buffer_get();
1458 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1460 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1461 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1463 #ifdef CONFIG_PERF_EVENTS
1464 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1465 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1466 #endif
1467 uprobe_buffer_put(ucb);
1468 return ret;
1471 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1472 unsigned long func, struct pt_regs *regs)
1474 struct trace_uprobe *tu;
1475 struct uprobe_dispatch_data udd;
1476 struct uprobe_cpu_buffer *ucb;
1477 int dsize, esize;
1479 tu = container_of(con, struct trace_uprobe, consumer);
1481 udd.tu = tu;
1482 udd.bp_addr = func;
1484 current->utask->vaddr = (unsigned long) &udd;
1486 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1487 return 0;
1489 dsize = __get_data_size(&tu->tp, regs);
1490 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1492 ucb = uprobe_buffer_get();
1493 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1495 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1496 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1498 #ifdef CONFIG_PERF_EVENTS
1499 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1500 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1501 #endif
1502 uprobe_buffer_put(ucb);
1503 return 0;
1506 static struct trace_event_functions uprobe_funcs = {
1507 .trace = print_uprobe_event
1510 static inline void init_trace_event_call(struct trace_uprobe *tu)
1512 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1514 call->event.funcs = &uprobe_funcs;
1515 call->class->define_fields = uprobe_event_define_fields;
1517 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1518 call->class->reg = trace_uprobe_register;
1521 static int register_uprobe_event(struct trace_uprobe *tu)
1523 init_trace_event_call(tu);
1525 return trace_probe_register_event_call(&tu->tp);
1528 static int unregister_uprobe_event(struct trace_uprobe *tu)
1530 return trace_probe_unregister_event_call(&tu->tp);
1533 #ifdef CONFIG_PERF_EVENTS
1534 struct trace_event_call *
1535 create_local_trace_uprobe(char *name, unsigned long offs,
1536 unsigned long ref_ctr_offset, bool is_return)
1538 struct trace_uprobe *tu;
1539 struct path path;
1540 int ret;
1542 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1543 if (ret)
1544 return ERR_PTR(ret);
1546 if (!d_is_reg(path.dentry)) {
1547 path_put(&path);
1548 return ERR_PTR(-EINVAL);
1552 * local trace_kprobes are not added to dyn_event, so they are never
1553 * searched in find_trace_kprobe(). Therefore, there is no concern of
1554 * duplicated name "DUMMY_EVENT" here.
1556 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1557 is_return);
1559 if (IS_ERR(tu)) {
1560 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1561 (int)PTR_ERR(tu));
1562 path_put(&path);
1563 return ERR_CAST(tu);
1566 tu->offset = offs;
1567 tu->path = path;
1568 tu->ref_ctr_offset = ref_ctr_offset;
1569 tu->filename = kstrdup(name, GFP_KERNEL);
1570 init_trace_event_call(tu);
1572 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1573 ret = -ENOMEM;
1574 goto error;
1577 return trace_probe_event_call(&tu->tp);
1578 error:
1579 free_trace_uprobe(tu);
1580 return ERR_PTR(ret);
1583 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1585 struct trace_uprobe *tu;
1587 tu = trace_uprobe_primary_from_call(event_call);
1589 free_trace_uprobe(tu);
1591 #endif /* CONFIG_PERF_EVENTS */
1593 /* Make a trace interface for controling probe points */
1594 static __init int init_uprobe_trace(void)
1596 struct dentry *d_tracer;
1597 int ret;
1599 ret = dyn_event_register(&trace_uprobe_ops);
1600 if (ret)
1601 return ret;
1603 d_tracer = tracing_init_dentry();
1604 if (IS_ERR(d_tracer))
1605 return 0;
1607 trace_create_file("uprobe_events", 0644, d_tracer,
1608 NULL, &uprobe_events_ops);
1609 /* Profile interface */
1610 trace_create_file("uprobe_profile", 0444, d_tracer,
1611 NULL, &uprobe_profile_ops);
1612 return 0;
1615 fs_initcall(init_uprobe_trace);