2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded
;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running
;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled
;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator
*tracepoint_print_iter
;
71 int tracepoint_printk
;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt
[] = {
80 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled
= 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask
;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops
;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning
;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head
{
127 unsigned long length
;
130 union trace_eval_map_item
;
132 struct trace_eval_map_tail
{
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item
*next
;
138 const char *end
; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex
);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item
{
151 struct trace_eval_map map
;
152 struct trace_eval_map_head head
;
153 struct trace_eval_map_tail tail
;
156 static union trace_eval_map_item
*trace_eval_maps
;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
163 static char *default_bootup_tracer
;
165 static bool allocate_snapshot
;
167 static int __init
set_cmdline_ftrace(char *str
)
169 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
170 default_bootup_tracer
= bootup_tracer_buf
;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded
= true;
175 __setup("ftrace=", set_cmdline_ftrace
);
177 static int __init
set_ftrace_dump_on_oops(char *str
)
179 if (*str
++ != '=' || !*str
) {
180 ftrace_dump_on_oops
= DUMP_ALL
;
184 if (!strcmp("orig_cpu", str
)) {
185 ftrace_dump_on_oops
= DUMP_ORIG
;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
193 static int __init
stop_trace_on_warning(char *str
)
195 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
196 __disable_trace_on_warning
= 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning
);
201 static int __init
boot_alloc_snapshot(char *str
)
203 allocate_snapshot
= true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded
= true;
208 __setup("alloc_snapshot", boot_alloc_snapshot
);
211 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
213 static int __init
set_trace_boot_options(char *str
)
215 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
218 __setup("trace_options=", set_trace_boot_options
);
220 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
221 static char *trace_boot_clock __initdata
;
223 static int __init
set_trace_boot_clock(char *str
)
225 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
226 trace_boot_clock
= trace_boot_clock_buf
;
229 __setup("trace_clock=", set_trace_boot_clock
);
231 static int __init
set_tracepoint_printk(char *str
)
233 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
234 tracepoint_printk
= 1;
237 __setup("tp_printk", set_tracepoint_printk
);
239 unsigned long long ns2usecs(u64 nsec
)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace
= {
267 .trace_flags
= TRACE_DEFAULT_FLAGS
,
270 LIST_HEAD(ftrace_trace_arrays
);
272 int trace_array_get(struct trace_array
*this_tr
)
274 struct trace_array
*tr
;
277 mutex_lock(&trace_types_lock
);
278 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
285 mutex_unlock(&trace_types_lock
);
290 static void __trace_array_put(struct trace_array
*this_tr
)
292 WARN_ON(!this_tr
->ref
);
296 void trace_array_put(struct trace_array
*this_tr
)
298 mutex_lock(&trace_types_lock
);
299 __trace_array_put(this_tr
);
300 mutex_unlock(&trace_types_lock
);
303 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
304 struct ring_buffer
*buffer
,
305 struct ring_buffer_event
*event
)
307 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
308 !filter_match_preds(call
->filter
, rec
)) {
309 __trace_event_discard_commit(buffer
, event
);
316 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
318 vfree(pid_list
->pids
);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid
>= filtered_pids
->pid_max
)
339 return test_bit(search_pid
, filtered_pids
->pids
);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
, struct task_struct
*task
)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids
, task
->pid
);
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
377 struct task_struct
*self
,
378 struct task_struct
*task
)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task
->pid
>= pid_list
->pid_max
)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task
->pid
, pid_list
->pids
);
397 clear_bit(task
->pid
, pid_list
->pids
);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
414 unsigned long pid
= (unsigned long)v
;
418 /* pid already is +1 of the actual prevous bit */
419 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid
< pid_list
->pid_max
)
423 return (void *)(pid
+ 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
444 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
445 if (pid
>= pid_list
->pid_max
)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid
++; pid
&& l
< *pos
;
450 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file
*m
, void *v
)
465 unsigned long pid
= (unsigned long)v
- 1;
467 seq_printf(m
, "%lu\n", pid
);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
475 struct trace_pid_list
**new_pid_list
,
476 const char __user
*ubuf
, size_t cnt
)
478 struct trace_pid_list
*pid_list
;
479 struct trace_parser parser
;
487 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
498 trace_parser_put(&parser
);
502 pid_list
->pid_max
= READ_ONCE(pid_max
);
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
506 pid_list
->pid_max
= filtered_pids
->pid_max
;
508 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
509 if (!pid_list
->pids
) {
510 trace_parser_put(&parser
);
516 /* copy the current bits to the new max */
517 for_each_set_bit(pid
, filtered_pids
->pids
,
518 filtered_pids
->pid_max
) {
519 set_bit(pid
, pid_list
->pids
);
528 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
529 if (ret
< 0 || !trace_parser_loaded(&parser
))
536 parser
.buffer
[parser
.idx
] = 0;
539 if (kstrtoul(parser
.buffer
, 0, &val
))
541 if (val
>= pid_list
->pid_max
)
546 set_bit(pid
, pid_list
->pids
);
549 trace_parser_clear(&parser
);
552 trace_parser_put(&parser
);
555 trace_free_pid_list(pid_list
);
560 /* Cleared the list of pids */
561 trace_free_pid_list(pid_list
);
566 *new_pid_list
= pid_list
;
571 static u64
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
575 /* Early boot up does not have a buffer yet */
577 return trace_clock_local();
579 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
580 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
585 u64
ftrace_now(int cpu
)
587 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
591 * tracing_is_enabled - Show if global_trace has been disabled
593 * Shows if the global trace has been enabled or not. It uses the
594 * mirror flag "buffer_disabled" to be used in fast paths such as for
595 * the irqsoff tracer. But it may be inaccurate due to races. If you
596 * need to know the accurate state, use tracing_is_on() which is a little
597 * slower, but accurate.
599 int tracing_is_enabled(void)
602 * For quick access (irqsoff uses this in fast path), just
603 * return the mirror variable of the state of the ring buffer.
604 * It's a little racy, but we don't really care.
607 return !global_trace
.buffer_disabled
;
611 * trace_buf_size is the size in bytes that is allocated
612 * for a buffer. Note, the number of bytes is always rounded
615 * This number is purposely set to a low number of 16384.
616 * If the dump on oops happens, it will be much appreciated
617 * to not have to wait for all that output. Anyway this can be
618 * boot time and run time configurable.
620 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
622 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
624 /* trace_types holds a link list of available tracers. */
625 static struct tracer
*trace_types __read_mostly
;
628 * trace_types_lock is used to protect the trace_types list.
630 DEFINE_MUTEX(trace_types_lock
);
633 * serialize the access of the ring buffer
635 * ring buffer serializes readers, but it is low level protection.
636 * The validity of the events (which returns by ring_buffer_peek() ..etc)
637 * are not protected by ring buffer.
639 * The content of events may become garbage if we allow other process consumes
640 * these events concurrently:
641 * A) the page of the consumed events may become a normal page
642 * (not reader page) in ring buffer, and this page will be rewrited
643 * by events producer.
644 * B) The page of the consumed events may become a page for splice_read,
645 * and this page will be returned to system.
647 * These primitives allow multi process access to different cpu ring buffer
650 * These primitives don't distinguish read-only and read-consume access.
651 * Multi read-only access are also serialized.
655 static DECLARE_RWSEM(all_cpu_access_lock
);
656 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
658 static inline void trace_access_lock(int cpu
)
660 if (cpu
== RING_BUFFER_ALL_CPUS
) {
661 /* gain it for accessing the whole ring buffer. */
662 down_write(&all_cpu_access_lock
);
664 /* gain it for accessing a cpu ring buffer. */
666 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
667 down_read(&all_cpu_access_lock
);
669 /* Secondly block other access to this @cpu ring buffer. */
670 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
674 static inline void trace_access_unlock(int cpu
)
676 if (cpu
== RING_BUFFER_ALL_CPUS
) {
677 up_write(&all_cpu_access_lock
);
679 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
680 up_read(&all_cpu_access_lock
);
684 static inline void trace_access_lock_init(void)
688 for_each_possible_cpu(cpu
)
689 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
694 static DEFINE_MUTEX(access_lock
);
696 static inline void trace_access_lock(int cpu
)
699 mutex_lock(&access_lock
);
702 static inline void trace_access_unlock(int cpu
)
705 mutex_unlock(&access_lock
);
708 static inline void trace_access_lock_init(void)
714 #ifdef CONFIG_STACKTRACE
715 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
717 int skip
, int pc
, struct pt_regs
*regs
);
718 static inline void ftrace_trace_stack(struct trace_array
*tr
,
719 struct ring_buffer
*buffer
,
721 int skip
, int pc
, struct pt_regs
*regs
);
724 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
726 int skip
, int pc
, struct pt_regs
*regs
)
729 static inline void ftrace_trace_stack(struct trace_array
*tr
,
730 struct ring_buffer
*buffer
,
732 int skip
, int pc
, struct pt_regs
*regs
)
738 static __always_inline
void
739 trace_event_setup(struct ring_buffer_event
*event
,
740 int type
, unsigned long flags
, int pc
)
742 struct trace_entry
*ent
= ring_buffer_event_data(event
);
744 tracing_generic_entry_update(ent
, flags
, pc
);
748 static __always_inline
struct ring_buffer_event
*
749 __trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
752 unsigned long flags
, int pc
)
754 struct ring_buffer_event
*event
;
756 event
= ring_buffer_lock_reserve(buffer
, len
);
758 trace_event_setup(event
, type
, flags
, pc
);
763 void tracer_tracing_on(struct trace_array
*tr
)
765 if (tr
->trace_buffer
.buffer
)
766 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
768 * This flag is looked at when buffers haven't been allocated
769 * yet, or by some tracers (like irqsoff), that just want to
770 * know if the ring buffer has been disabled, but it can handle
771 * races of where it gets disabled but we still do a record.
772 * As the check is in the fast path of the tracers, it is more
773 * important to be fast than accurate.
775 tr
->buffer_disabled
= 0;
776 /* Make the flag seen by readers */
781 * tracing_on - enable tracing buffers
783 * This function enables tracing buffers that may have been
784 * disabled with tracing_off.
786 void tracing_on(void)
788 tracer_tracing_on(&global_trace
);
790 EXPORT_SYMBOL_GPL(tracing_on
);
793 static __always_inline
void
794 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
796 __this_cpu_write(trace_taskinfo_save
, true);
798 /* If this is the temp buffer, we need to commit fully */
799 if (this_cpu_read(trace_buffered_event
) == event
) {
800 /* Length is in event->array[0] */
801 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
802 /* Release the temp buffer */
803 this_cpu_dec(trace_buffered_event_cnt
);
805 ring_buffer_unlock_commit(buffer
, event
);
809 * __trace_puts - write a constant string into the trace buffer.
810 * @ip: The address of the caller
811 * @str: The constant string to write
812 * @size: The size of the string.
814 int __trace_puts(unsigned long ip
, const char *str
, int size
)
816 struct ring_buffer_event
*event
;
817 struct ring_buffer
*buffer
;
818 struct print_entry
*entry
;
819 unsigned long irq_flags
;
823 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
826 pc
= preempt_count();
828 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
831 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
833 local_save_flags(irq_flags
);
834 buffer
= global_trace
.trace_buffer
.buffer
;
835 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
840 entry
= ring_buffer_event_data(event
);
843 memcpy(&entry
->buf
, str
, size
);
845 /* Add a newline if necessary */
846 if (entry
->buf
[size
- 1] != '\n') {
847 entry
->buf
[size
] = '\n';
848 entry
->buf
[size
+ 1] = '\0';
850 entry
->buf
[size
] = '\0';
852 __buffer_unlock_commit(buffer
, event
);
853 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
857 EXPORT_SYMBOL_GPL(__trace_puts
);
860 * __trace_bputs - write the pointer to a constant string into trace buffer
861 * @ip: The address of the caller
862 * @str: The constant string to write to the buffer to
864 int __trace_bputs(unsigned long ip
, const char *str
)
866 struct ring_buffer_event
*event
;
867 struct ring_buffer
*buffer
;
868 struct bputs_entry
*entry
;
869 unsigned long irq_flags
;
870 int size
= sizeof(struct bputs_entry
);
873 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
876 pc
= preempt_count();
878 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
881 local_save_flags(irq_flags
);
882 buffer
= global_trace
.trace_buffer
.buffer
;
883 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
888 entry
= ring_buffer_event_data(event
);
892 __buffer_unlock_commit(buffer
, event
);
893 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
897 EXPORT_SYMBOL_GPL(__trace_bputs
);
899 #ifdef CONFIG_TRACER_SNAPSHOT
900 void tracing_snapshot_instance(struct trace_array
*tr
)
902 struct tracer
*tracer
= tr
->current_trace
;
906 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
907 internal_trace_puts("*** snapshot is being ignored ***\n");
911 if (!tr
->allocated_snapshot
) {
912 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
913 internal_trace_puts("*** stopping trace here! ***\n");
918 /* Note, snapshot can not be used when the tracer uses it */
919 if (tracer
->use_max_tr
) {
920 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
921 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
925 local_irq_save(flags
);
926 update_max_tr(tr
, current
, smp_processor_id());
927 local_irq_restore(flags
);
931 * trace_snapshot - take a snapshot of the current buffer.
933 * This causes a swap between the snapshot buffer and the current live
934 * tracing buffer. You can use this to take snapshots of the live
935 * trace when some condition is triggered, but continue to trace.
937 * Note, make sure to allocate the snapshot with either
938 * a tracing_snapshot_alloc(), or by doing it manually
939 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
941 * If the snapshot buffer is not allocated, it will stop tracing.
942 * Basically making a permanent snapshot.
944 void tracing_snapshot(void)
946 struct trace_array
*tr
= &global_trace
;
948 tracing_snapshot_instance(tr
);
950 EXPORT_SYMBOL_GPL(tracing_snapshot
);
952 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
953 struct trace_buffer
*size_buf
, int cpu_id
);
954 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
956 int tracing_alloc_snapshot_instance(struct trace_array
*tr
)
960 if (!tr
->allocated_snapshot
) {
962 /* allocate spare buffer */
963 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
964 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
968 tr
->allocated_snapshot
= true;
974 static void free_snapshot(struct trace_array
*tr
)
977 * We don't free the ring buffer. instead, resize it because
978 * The max_tr ring buffer has some state (e.g. ring->clock) and
979 * we want preserve it.
981 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
982 set_buffer_entries(&tr
->max_buffer
, 1);
983 tracing_reset_online_cpus(&tr
->max_buffer
);
984 tr
->allocated_snapshot
= false;
988 * tracing_alloc_snapshot - allocate snapshot buffer.
990 * This only allocates the snapshot buffer if it isn't already
991 * allocated - it doesn't also take a snapshot.
993 * This is meant to be used in cases where the snapshot buffer needs
994 * to be set up for events that can't sleep but need to be able to
995 * trigger a snapshot.
997 int tracing_alloc_snapshot(void)
999 struct trace_array
*tr
= &global_trace
;
1002 ret
= tracing_alloc_snapshot_instance(tr
);
1007 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1010 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1012 * This is similar to trace_snapshot(), but it will allocate the
1013 * snapshot buffer if it isn't already allocated. Use this only
1014 * where it is safe to sleep, as the allocation may sleep.
1016 * This causes a swap between the snapshot buffer and the current live
1017 * tracing buffer. You can use this to take snapshots of the live
1018 * trace when some condition is triggered, but continue to trace.
1020 void tracing_snapshot_alloc(void)
1024 ret
= tracing_alloc_snapshot();
1030 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1032 void tracing_snapshot(void)
1034 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1036 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1037 int tracing_alloc_snapshot(void)
1039 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1042 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1043 void tracing_snapshot_alloc(void)
1048 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1049 #endif /* CONFIG_TRACER_SNAPSHOT */
1051 void tracer_tracing_off(struct trace_array
*tr
)
1053 if (tr
->trace_buffer
.buffer
)
1054 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
1056 * This flag is looked at when buffers haven't been allocated
1057 * yet, or by some tracers (like irqsoff), that just want to
1058 * know if the ring buffer has been disabled, but it can handle
1059 * races of where it gets disabled but we still do a record.
1060 * As the check is in the fast path of the tracers, it is more
1061 * important to be fast than accurate.
1063 tr
->buffer_disabled
= 1;
1064 /* Make the flag seen by readers */
1069 * tracing_off - turn off tracing buffers
1071 * This function stops the tracing buffers from recording data.
1072 * It does not disable any overhead the tracers themselves may
1073 * be causing. This function simply causes all recording to
1074 * the ring buffers to fail.
1076 void tracing_off(void)
1078 tracer_tracing_off(&global_trace
);
1080 EXPORT_SYMBOL_GPL(tracing_off
);
1082 void disable_trace_on_warning(void)
1084 if (__disable_trace_on_warning
)
1089 * tracer_tracing_is_on - show real state of ring buffer enabled
1090 * @tr : the trace array to know if ring buffer is enabled
1092 * Shows real state of the ring buffer if it is enabled or not.
1094 int tracer_tracing_is_on(struct trace_array
*tr
)
1096 if (tr
->trace_buffer
.buffer
)
1097 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
1098 return !tr
->buffer_disabled
;
1102 * tracing_is_on - show state of ring buffers enabled
1104 int tracing_is_on(void)
1106 return tracer_tracing_is_on(&global_trace
);
1108 EXPORT_SYMBOL_GPL(tracing_is_on
);
1110 static int __init
set_buf_size(char *str
)
1112 unsigned long buf_size
;
1116 buf_size
= memparse(str
, &str
);
1117 /* nr_entries can not be zero */
1120 trace_buf_size
= buf_size
;
1123 __setup("trace_buf_size=", set_buf_size
);
1125 static int __init
set_tracing_thresh(char *str
)
1127 unsigned long threshold
;
1132 ret
= kstrtoul(str
, 0, &threshold
);
1135 tracing_thresh
= threshold
* 1000;
1138 __setup("tracing_thresh=", set_tracing_thresh
);
1140 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1142 return nsecs
/ 1000;
1146 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1147 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1148 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1149 * of strings in the order that the evals (enum) were defined.
1154 /* These must match the bit postions in trace_iterator_flags */
1155 static const char *trace_options
[] = {
1163 int in_ns
; /* is this clock in nanoseconds? */
1164 } trace_clocks
[] = {
1165 { trace_clock_local
, "local", 1 },
1166 { trace_clock_global
, "global", 1 },
1167 { trace_clock_counter
, "counter", 0 },
1168 { trace_clock_jiffies
, "uptime", 0 },
1169 { trace_clock
, "perf", 1 },
1170 { ktime_get_mono_fast_ns
, "mono", 1 },
1171 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1172 { ktime_get_boot_fast_ns
, "boot", 1 },
1177 * trace_parser_get_init - gets the buffer for trace parser
1179 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1181 memset(parser
, 0, sizeof(*parser
));
1183 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1184 if (!parser
->buffer
)
1187 parser
->size
= size
;
1192 * trace_parser_put - frees the buffer for trace parser
1194 void trace_parser_put(struct trace_parser
*parser
)
1196 kfree(parser
->buffer
);
1197 parser
->buffer
= NULL
;
1201 * trace_get_user - reads the user input string separated by space
1202 * (matched by isspace(ch))
1204 * For each string found the 'struct trace_parser' is updated,
1205 * and the function returns.
1207 * Returns number of bytes read.
1209 * See kernel/trace/trace.h for 'struct trace_parser' details.
1211 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1212 size_t cnt
, loff_t
*ppos
)
1219 trace_parser_clear(parser
);
1221 ret
= get_user(ch
, ubuf
++);
1229 * The parser is not finished with the last write,
1230 * continue reading the user input without skipping spaces.
1232 if (!parser
->cont
) {
1233 /* skip white space */
1234 while (cnt
&& isspace(ch
)) {
1235 ret
= get_user(ch
, ubuf
++);
1242 /* only spaces were written */
1252 /* read the non-space input */
1253 while (cnt
&& !isspace(ch
)) {
1254 if (parser
->idx
< parser
->size
- 1)
1255 parser
->buffer
[parser
->idx
++] = ch
;
1260 ret
= get_user(ch
, ubuf
++);
1267 /* We either got finished input or we have to wait for another call. */
1269 parser
->buffer
[parser
->idx
] = 0;
1270 parser
->cont
= false;
1271 } else if (parser
->idx
< parser
->size
- 1) {
1272 parser
->cont
= true;
1273 parser
->buffer
[parser
->idx
++] = ch
;
1286 /* TODO add a seq_buf_to_buffer() */
1287 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1291 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1294 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1297 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1299 s
->seq
.readpos
+= cnt
;
1303 unsigned long __read_mostly tracing_thresh
;
1305 #ifdef CONFIG_TRACER_MAX_TRACE
1307 * Copy the new maximum trace into the separate maximum-trace
1308 * structure. (this way the maximum trace is permanently saved,
1309 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1312 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1314 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1315 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1316 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1317 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1320 max_buf
->time_start
= data
->preempt_timestamp
;
1322 max_data
->saved_latency
= tr
->max_latency
;
1323 max_data
->critical_start
= data
->critical_start
;
1324 max_data
->critical_end
= data
->critical_end
;
1326 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1327 max_data
->pid
= tsk
->pid
;
1329 * If tsk == current, then use current_uid(), as that does not use
1330 * RCU. The irq tracer can be called out of RCU scope.
1333 max_data
->uid
= current_uid();
1335 max_data
->uid
= task_uid(tsk
);
1337 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1338 max_data
->policy
= tsk
->policy
;
1339 max_data
->rt_priority
= tsk
->rt_priority
;
1341 /* record this tasks comm */
1342 tracing_record_cmdline(tsk
);
1346 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1348 * @tsk: the task with the latency
1349 * @cpu: The cpu that initiated the trace.
1351 * Flip the buffers between the @tr and the max_tr and record information
1352 * about which task was the cause of this latency.
1355 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1357 struct ring_buffer
*buf
;
1362 WARN_ON_ONCE(!irqs_disabled());
1364 if (!tr
->allocated_snapshot
) {
1365 /* Only the nop tracer should hit this when disabling */
1366 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1370 arch_spin_lock(&tr
->max_lock
);
1372 /* Inherit the recordable setting from trace_buffer */
1373 if (ring_buffer_record_is_set_on(tr
->trace_buffer
.buffer
))
1374 ring_buffer_record_on(tr
->max_buffer
.buffer
);
1376 ring_buffer_record_off(tr
->max_buffer
.buffer
);
1378 buf
= tr
->trace_buffer
.buffer
;
1379 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1380 tr
->max_buffer
.buffer
= buf
;
1382 __update_max_tr(tr
, tsk
, cpu
);
1383 arch_spin_unlock(&tr
->max_lock
);
1387 * update_max_tr_single - only copy one trace over, and reset the rest
1389 * @tsk - task with the latency
1390 * @cpu - the cpu of the buffer to copy.
1392 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1395 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1402 WARN_ON_ONCE(!irqs_disabled());
1403 if (!tr
->allocated_snapshot
) {
1404 /* Only the nop tracer should hit this when disabling */
1405 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1409 arch_spin_lock(&tr
->max_lock
);
1411 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1413 if (ret
== -EBUSY
) {
1415 * We failed to swap the buffer due to a commit taking
1416 * place on this CPU. We fail to record, but we reset
1417 * the max trace buffer (no one writes directly to it)
1418 * and flag that it failed.
1420 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1421 "Failed to swap buffers due to commit in progress\n");
1424 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1426 __update_max_tr(tr
, tsk
, cpu
);
1427 arch_spin_unlock(&tr
->max_lock
);
1429 #endif /* CONFIG_TRACER_MAX_TRACE */
1431 static int wait_on_pipe(struct trace_iterator
*iter
, bool full
)
1433 /* Iterators are static, they should be filled or empty */
1434 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1437 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1441 #ifdef CONFIG_FTRACE_STARTUP_TEST
1442 static bool selftests_can_run
;
1444 struct trace_selftests
{
1445 struct list_head list
;
1446 struct tracer
*type
;
1449 static LIST_HEAD(postponed_selftests
);
1451 static int save_selftest(struct tracer
*type
)
1453 struct trace_selftests
*selftest
;
1455 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1459 selftest
->type
= type
;
1460 list_add(&selftest
->list
, &postponed_selftests
);
1464 static int run_tracer_selftest(struct tracer
*type
)
1466 struct trace_array
*tr
= &global_trace
;
1467 struct tracer
*saved_tracer
= tr
->current_trace
;
1470 if (!type
->selftest
|| tracing_selftest_disabled
)
1474 * If a tracer registers early in boot up (before scheduling is
1475 * initialized and such), then do not run its selftests yet.
1476 * Instead, run it a little later in the boot process.
1478 if (!selftests_can_run
)
1479 return save_selftest(type
);
1482 * Run a selftest on this tracer.
1483 * Here we reset the trace buffer, and set the current
1484 * tracer to be this tracer. The tracer can then run some
1485 * internal tracing to verify that everything is in order.
1486 * If we fail, we do not register this tracer.
1488 tracing_reset_online_cpus(&tr
->trace_buffer
);
1490 tr
->current_trace
= type
;
1492 #ifdef CONFIG_TRACER_MAX_TRACE
1493 if (type
->use_max_tr
) {
1494 /* If we expanded the buffers, make sure the max is expanded too */
1495 if (ring_buffer_expanded
)
1496 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1497 RING_BUFFER_ALL_CPUS
);
1498 tr
->allocated_snapshot
= true;
1502 /* the test is responsible for initializing and enabling */
1503 pr_info("Testing tracer %s: ", type
->name
);
1504 ret
= type
->selftest(type
, tr
);
1505 /* the test is responsible for resetting too */
1506 tr
->current_trace
= saved_tracer
;
1508 printk(KERN_CONT
"FAILED!\n");
1509 /* Add the warning after printing 'FAILED' */
1513 /* Only reset on passing, to avoid touching corrupted buffers */
1514 tracing_reset_online_cpus(&tr
->trace_buffer
);
1516 #ifdef CONFIG_TRACER_MAX_TRACE
1517 if (type
->use_max_tr
) {
1518 tr
->allocated_snapshot
= false;
1520 /* Shrink the max buffer again */
1521 if (ring_buffer_expanded
)
1522 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1523 RING_BUFFER_ALL_CPUS
);
1527 printk(KERN_CONT
"PASSED\n");
1531 static __init
int init_trace_selftests(void)
1533 struct trace_selftests
*p
, *n
;
1534 struct tracer
*t
, **last
;
1537 selftests_can_run
= true;
1539 mutex_lock(&trace_types_lock
);
1541 if (list_empty(&postponed_selftests
))
1544 pr_info("Running postponed tracer tests:\n");
1546 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
1547 ret
= run_tracer_selftest(p
->type
);
1548 /* If the test fails, then warn and remove from available_tracers */
1550 WARN(1, "tracer: %s failed selftest, disabling\n",
1552 last
= &trace_types
;
1553 for (t
= trace_types
; t
; t
= t
->next
) {
1566 mutex_unlock(&trace_types_lock
);
1570 core_initcall(init_trace_selftests
);
1572 static inline int run_tracer_selftest(struct tracer
*type
)
1576 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1578 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1580 static void __init
apply_trace_boot_options(void);
1583 * register_tracer - register a tracer with the ftrace system.
1584 * @type - the plugin for the tracer
1586 * Register a new plugin tracer.
1588 int __init
register_tracer(struct tracer
*type
)
1594 pr_info("Tracer must have a name\n");
1598 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1599 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1603 mutex_lock(&trace_types_lock
);
1605 tracing_selftest_running
= true;
1607 for (t
= trace_types
; t
; t
= t
->next
) {
1608 if (strcmp(type
->name
, t
->name
) == 0) {
1610 pr_info("Tracer %s already registered\n",
1617 if (!type
->set_flag
)
1618 type
->set_flag
= &dummy_set_flag
;
1620 /*allocate a dummy tracer_flags*/
1621 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
1626 type
->flags
->val
= 0;
1627 type
->flags
->opts
= dummy_tracer_opt
;
1629 if (!type
->flags
->opts
)
1630 type
->flags
->opts
= dummy_tracer_opt
;
1632 /* store the tracer for __set_tracer_option */
1633 type
->flags
->trace
= type
;
1635 ret
= run_tracer_selftest(type
);
1639 type
->next
= trace_types
;
1641 add_tracer_options(&global_trace
, type
);
1644 tracing_selftest_running
= false;
1645 mutex_unlock(&trace_types_lock
);
1647 if (ret
|| !default_bootup_tracer
)
1650 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1653 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1654 /* Do we want this tracer to start on bootup? */
1655 tracing_set_tracer(&global_trace
, type
->name
);
1656 default_bootup_tracer
= NULL
;
1658 apply_trace_boot_options();
1660 /* disable other selftests, since this will break it. */
1661 tracing_selftest_disabled
= true;
1662 #ifdef CONFIG_FTRACE_STARTUP_TEST
1663 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1671 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1673 struct ring_buffer
*buffer
= buf
->buffer
;
1678 ring_buffer_record_disable(buffer
);
1680 /* Make sure all commits have finished */
1681 synchronize_sched();
1682 ring_buffer_reset_cpu(buffer
, cpu
);
1684 ring_buffer_record_enable(buffer
);
1687 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1689 struct ring_buffer
*buffer
= buf
->buffer
;
1695 ring_buffer_record_disable(buffer
);
1697 /* Make sure all commits have finished */
1698 synchronize_sched();
1700 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1702 for_each_online_cpu(cpu
)
1703 ring_buffer_reset_cpu(buffer
, cpu
);
1705 ring_buffer_record_enable(buffer
);
1708 /* Must have trace_types_lock held */
1709 void tracing_reset_all_online_cpus(void)
1711 struct trace_array
*tr
;
1713 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1714 if (!tr
->clear_trace
)
1716 tr
->clear_trace
= false;
1717 tracing_reset_online_cpus(&tr
->trace_buffer
);
1718 #ifdef CONFIG_TRACER_MAX_TRACE
1719 tracing_reset_online_cpus(&tr
->max_buffer
);
1724 static int *tgid_map
;
1726 #define SAVED_CMDLINES_DEFAULT 128
1727 #define NO_CMDLINE_MAP UINT_MAX
1728 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1729 struct saved_cmdlines_buffer
{
1730 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1731 unsigned *map_cmdline_to_pid
;
1732 unsigned cmdline_num
;
1734 char *saved_cmdlines
;
1736 static struct saved_cmdlines_buffer
*savedcmd
;
1738 /* temporary disable recording */
1739 static atomic_t trace_record_taskinfo_disabled __read_mostly
;
1741 static inline char *get_saved_cmdlines(int idx
)
1743 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1746 static inline void set_cmdline(int idx
, const char *cmdline
)
1748 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1751 static int allocate_cmdlines_buffer(unsigned int val
,
1752 struct saved_cmdlines_buffer
*s
)
1754 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1756 if (!s
->map_cmdline_to_pid
)
1759 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1760 if (!s
->saved_cmdlines
) {
1761 kfree(s
->map_cmdline_to_pid
);
1766 s
->cmdline_num
= val
;
1767 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1768 sizeof(s
->map_pid_to_cmdline
));
1769 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1770 val
* sizeof(*s
->map_cmdline_to_pid
));
1775 static int trace_create_savedcmd(void)
1779 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1783 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1793 int is_tracing_stopped(void)
1795 return global_trace
.stop_count
;
1799 * tracing_start - quick start of the tracer
1801 * If tracing is enabled but was stopped by tracing_stop,
1802 * this will start the tracer back up.
1804 void tracing_start(void)
1806 struct ring_buffer
*buffer
;
1807 unsigned long flags
;
1809 if (tracing_disabled
)
1812 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1813 if (--global_trace
.stop_count
) {
1814 if (global_trace
.stop_count
< 0) {
1815 /* Someone screwed up their debugging */
1817 global_trace
.stop_count
= 0;
1822 /* Prevent the buffers from switching */
1823 arch_spin_lock(&global_trace
.max_lock
);
1825 buffer
= global_trace
.trace_buffer
.buffer
;
1827 ring_buffer_record_enable(buffer
);
1829 #ifdef CONFIG_TRACER_MAX_TRACE
1830 buffer
= global_trace
.max_buffer
.buffer
;
1832 ring_buffer_record_enable(buffer
);
1835 arch_spin_unlock(&global_trace
.max_lock
);
1838 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1841 static void tracing_start_tr(struct trace_array
*tr
)
1843 struct ring_buffer
*buffer
;
1844 unsigned long flags
;
1846 if (tracing_disabled
)
1849 /* If global, we need to also start the max tracer */
1850 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1851 return tracing_start();
1853 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1855 if (--tr
->stop_count
) {
1856 if (tr
->stop_count
< 0) {
1857 /* Someone screwed up their debugging */
1864 buffer
= tr
->trace_buffer
.buffer
;
1866 ring_buffer_record_enable(buffer
);
1869 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1873 * tracing_stop - quick stop of the tracer
1875 * Light weight way to stop tracing. Use in conjunction with
1878 void tracing_stop(void)
1880 struct ring_buffer
*buffer
;
1881 unsigned long flags
;
1883 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1884 if (global_trace
.stop_count
++)
1887 /* Prevent the buffers from switching */
1888 arch_spin_lock(&global_trace
.max_lock
);
1890 buffer
= global_trace
.trace_buffer
.buffer
;
1892 ring_buffer_record_disable(buffer
);
1894 #ifdef CONFIG_TRACER_MAX_TRACE
1895 buffer
= global_trace
.max_buffer
.buffer
;
1897 ring_buffer_record_disable(buffer
);
1900 arch_spin_unlock(&global_trace
.max_lock
);
1903 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1906 static void tracing_stop_tr(struct trace_array
*tr
)
1908 struct ring_buffer
*buffer
;
1909 unsigned long flags
;
1911 /* If global, we need to also stop the max tracer */
1912 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1913 return tracing_stop();
1915 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1916 if (tr
->stop_count
++)
1919 buffer
= tr
->trace_buffer
.buffer
;
1921 ring_buffer_record_disable(buffer
);
1924 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1927 static int trace_save_cmdline(struct task_struct
*tsk
)
1931 /* treat recording of idle task as a success */
1935 if (unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1939 * It's not the end of the world if we don't get
1940 * the lock, but we also don't want to spin
1941 * nor do we want to disable interrupts,
1942 * so if we miss here, then better luck next time.
1944 if (!arch_spin_trylock(&trace_cmdline_lock
))
1947 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1948 if (idx
== NO_CMDLINE_MAP
) {
1949 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1952 * Check whether the cmdline buffer at idx has a pid
1953 * mapped. We are going to overwrite that entry so we
1954 * need to clear the map_pid_to_cmdline. Otherwise we
1955 * would read the new comm for the old pid.
1957 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1958 if (pid
!= NO_CMDLINE_MAP
)
1959 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1961 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1962 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1964 savedcmd
->cmdline_idx
= idx
;
1967 set_cmdline(idx
, tsk
->comm
);
1969 arch_spin_unlock(&trace_cmdline_lock
);
1974 static void __trace_find_cmdline(int pid
, char comm
[])
1979 strcpy(comm
, "<idle>");
1983 if (WARN_ON_ONCE(pid
< 0)) {
1984 strcpy(comm
, "<XXX>");
1988 if (pid
> PID_MAX_DEFAULT
) {
1989 strcpy(comm
, "<...>");
1993 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1994 if (map
!= NO_CMDLINE_MAP
)
1995 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
1997 strcpy(comm
, "<...>");
2000 void trace_find_cmdline(int pid
, char comm
[])
2003 arch_spin_lock(&trace_cmdline_lock
);
2005 __trace_find_cmdline(pid
, comm
);
2007 arch_spin_unlock(&trace_cmdline_lock
);
2011 int trace_find_tgid(int pid
)
2013 if (unlikely(!tgid_map
|| !pid
|| pid
> PID_MAX_DEFAULT
))
2016 return tgid_map
[pid
];
2019 static int trace_save_tgid(struct task_struct
*tsk
)
2021 /* treat recording of idle task as a success */
2025 if (unlikely(!tgid_map
|| tsk
->pid
> PID_MAX_DEFAULT
))
2028 tgid_map
[tsk
->pid
] = tsk
->tgid
;
2032 static bool tracing_record_taskinfo_skip(int flags
)
2034 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2036 if (atomic_read(&trace_record_taskinfo_disabled
) || !tracing_is_on())
2038 if (!__this_cpu_read(trace_taskinfo_save
))
2044 * tracing_record_taskinfo - record the task info of a task
2046 * @task - task to record
2047 * @flags - TRACE_RECORD_CMDLINE for recording comm
2048 * - TRACE_RECORD_TGID for recording tgid
2050 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2054 if (tracing_record_taskinfo_skip(flags
))
2058 * Record as much task information as possible. If some fail, continue
2059 * to try to record the others.
2061 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2062 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2064 /* If recording any information failed, retry again soon. */
2068 __this_cpu_write(trace_taskinfo_save
, false);
2072 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2074 * @prev - previous task during sched_switch
2075 * @next - next task during sched_switch
2076 * @flags - TRACE_RECORD_CMDLINE for recording comm
2077 * TRACE_RECORD_TGID for recording tgid
2079 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2080 struct task_struct
*next
, int flags
)
2084 if (tracing_record_taskinfo_skip(flags
))
2088 * Record as much task information as possible. If some fail, continue
2089 * to try to record the others.
2091 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2092 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2093 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2094 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2096 /* If recording any information failed, retry again soon. */
2100 __this_cpu_write(trace_taskinfo_save
, false);
2103 /* Helpers to record a specific task information */
2104 void tracing_record_cmdline(struct task_struct
*task
)
2106 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2109 void tracing_record_tgid(struct task_struct
*task
)
2111 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2115 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2116 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2117 * simplifies those functions and keeps them in sync.
2119 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2121 return trace_seq_has_overflowed(s
) ?
2122 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2124 EXPORT_SYMBOL_GPL(trace_handle_return
);
2127 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
2130 struct task_struct
*tsk
= current
;
2132 entry
->preempt_count
= pc
& 0xff;
2133 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2135 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2136 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2138 TRACE_FLAG_IRQS_NOSUPPORT
|
2140 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2141 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2142 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2143 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2144 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2146 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2148 struct ring_buffer_event
*
2149 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
2152 unsigned long flags
, int pc
)
2154 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2157 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2158 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2159 static int trace_buffered_event_ref
;
2162 * trace_buffered_event_enable - enable buffering events
2164 * When events are being filtered, it is quicker to use a temporary
2165 * buffer to write the event data into if there's a likely chance
2166 * that it will not be committed. The discard of the ring buffer
2167 * is not as fast as committing, and is much slower than copying
2170 * When an event is to be filtered, allocate per cpu buffers to
2171 * write the event data into, and if the event is filtered and discarded
2172 * it is simply dropped, otherwise, the entire data is to be committed
2175 void trace_buffered_event_enable(void)
2177 struct ring_buffer_event
*event
;
2181 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2183 if (trace_buffered_event_ref
++)
2186 for_each_tracing_cpu(cpu
) {
2187 page
= alloc_pages_node(cpu_to_node(cpu
),
2188 GFP_KERNEL
| __GFP_NORETRY
, 0);
2192 event
= page_address(page
);
2193 memset(event
, 0, sizeof(*event
));
2195 per_cpu(trace_buffered_event
, cpu
) = event
;
2198 if (cpu
== smp_processor_id() &&
2199 this_cpu_read(trace_buffered_event
) !=
2200 per_cpu(trace_buffered_event
, cpu
))
2207 trace_buffered_event_disable();
2210 static void enable_trace_buffered_event(void *data
)
2212 /* Probably not needed, but do it anyway */
2214 this_cpu_dec(trace_buffered_event_cnt
);
2217 static void disable_trace_buffered_event(void *data
)
2219 this_cpu_inc(trace_buffered_event_cnt
);
2223 * trace_buffered_event_disable - disable buffering events
2225 * When a filter is removed, it is faster to not use the buffered
2226 * events, and to commit directly into the ring buffer. Free up
2227 * the temp buffers when there are no more users. This requires
2228 * special synchronization with current events.
2230 void trace_buffered_event_disable(void)
2234 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2236 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2239 if (--trace_buffered_event_ref
)
2243 /* For each CPU, set the buffer as used. */
2244 smp_call_function_many(tracing_buffer_mask
,
2245 disable_trace_buffered_event
, NULL
, 1);
2248 /* Wait for all current users to finish */
2249 synchronize_sched();
2251 for_each_tracing_cpu(cpu
) {
2252 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2253 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2256 * Make sure trace_buffered_event is NULL before clearing
2257 * trace_buffered_event_cnt.
2262 /* Do the work on each cpu */
2263 smp_call_function_many(tracing_buffer_mask
,
2264 enable_trace_buffered_event
, NULL
, 1);
2268 static struct ring_buffer
*temp_buffer
;
2270 struct ring_buffer_event
*
2271 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
2272 struct trace_event_file
*trace_file
,
2273 int type
, unsigned long len
,
2274 unsigned long flags
, int pc
)
2276 struct ring_buffer_event
*entry
;
2279 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
2281 if ((trace_file
->flags
&
2282 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2283 (entry
= this_cpu_read(trace_buffered_event
))) {
2284 /* Try to use the per cpu buffer first */
2285 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2287 trace_event_setup(entry
, type
, flags
, pc
);
2288 entry
->array
[0] = len
;
2291 this_cpu_dec(trace_buffered_event_cnt
);
2294 entry
= __trace_buffer_lock_reserve(*current_rb
,
2295 type
, len
, flags
, pc
);
2297 * If tracing is off, but we have triggers enabled
2298 * we still need to look at the event data. Use the temp_buffer
2299 * to store the trace event for the tigger to use. It's recusive
2300 * safe and will not be recorded anywhere.
2302 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2303 *current_rb
= temp_buffer
;
2304 entry
= __trace_buffer_lock_reserve(*current_rb
,
2305 type
, len
, flags
, pc
);
2309 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2311 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2312 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2314 static void output_printk(struct trace_event_buffer
*fbuffer
)
2316 struct trace_event_call
*event_call
;
2317 struct trace_event
*event
;
2318 unsigned long flags
;
2319 struct trace_iterator
*iter
= tracepoint_print_iter
;
2321 /* We should never get here if iter is NULL */
2322 if (WARN_ON_ONCE(!iter
))
2325 event_call
= fbuffer
->trace_file
->event_call
;
2326 if (!event_call
|| !event_call
->event
.funcs
||
2327 !event_call
->event
.funcs
->trace
)
2330 event
= &fbuffer
->trace_file
->event_call
->event
;
2332 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2333 trace_seq_init(&iter
->seq
);
2334 iter
->ent
= fbuffer
->entry
;
2335 event_call
->event
.funcs
->trace(iter
, 0, event
);
2336 trace_seq_putc(&iter
->seq
, 0);
2337 printk("%s", iter
->seq
.buffer
);
2339 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2342 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2343 void __user
*buffer
, size_t *lenp
,
2346 int save_tracepoint_printk
;
2349 mutex_lock(&tracepoint_printk_mutex
);
2350 save_tracepoint_printk
= tracepoint_printk
;
2352 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2355 * This will force exiting early, as tracepoint_printk
2356 * is always zero when tracepoint_printk_iter is not allocated
2358 if (!tracepoint_print_iter
)
2359 tracepoint_printk
= 0;
2361 if (save_tracepoint_printk
== tracepoint_printk
)
2364 if (tracepoint_printk
)
2365 static_key_enable(&tracepoint_printk_key
.key
);
2367 static_key_disable(&tracepoint_printk_key
.key
);
2370 mutex_unlock(&tracepoint_printk_mutex
);
2375 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2377 if (static_key_false(&tracepoint_printk_key
.key
))
2378 output_printk(fbuffer
);
2380 event_trigger_unlock_commit(fbuffer
->trace_file
, fbuffer
->buffer
,
2381 fbuffer
->event
, fbuffer
->entry
,
2382 fbuffer
->flags
, fbuffer
->pc
);
2384 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2386 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2387 struct ring_buffer
*buffer
,
2388 struct ring_buffer_event
*event
,
2389 unsigned long flags
, int pc
,
2390 struct pt_regs
*regs
)
2392 __buffer_unlock_commit(buffer
, event
);
2395 * If regs is not set, then skip the following callers:
2396 * trace_buffer_unlock_commit_regs
2397 * event_trigger_unlock_commit
2398 * trace_event_buffer_commit
2399 * trace_event_raw_event_sched_switch
2400 * Note, we can still get here via blktrace, wakeup tracer
2401 * and mmiotrace, but that's ok if they lose a function or
2402 * two. They are that meaningful.
2404 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : 4, pc
, regs
);
2405 ftrace_trace_userstack(buffer
, flags
, pc
);
2409 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2412 trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
2413 struct ring_buffer_event
*event
)
2415 __buffer_unlock_commit(buffer
, event
);
2419 trace_process_export(struct trace_export
*export
,
2420 struct ring_buffer_event
*event
)
2422 struct trace_entry
*entry
;
2423 unsigned int size
= 0;
2425 entry
= ring_buffer_event_data(event
);
2426 size
= ring_buffer_event_length(event
);
2427 export
->write(entry
, size
);
2430 static DEFINE_MUTEX(ftrace_export_lock
);
2432 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
2434 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled
);
2436 static inline void ftrace_exports_enable(void)
2438 static_branch_enable(&ftrace_exports_enabled
);
2441 static inline void ftrace_exports_disable(void)
2443 static_branch_disable(&ftrace_exports_enabled
);
2446 void ftrace_exports(struct ring_buffer_event
*event
)
2448 struct trace_export
*export
;
2450 preempt_disable_notrace();
2452 export
= rcu_dereference_raw_notrace(ftrace_exports_list
);
2454 trace_process_export(export
, event
);
2455 export
= rcu_dereference_raw_notrace(export
->next
);
2458 preempt_enable_notrace();
2462 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2464 rcu_assign_pointer(export
->next
, *list
);
2466 * We are entering export into the list but another
2467 * CPU might be walking that list. We need to make sure
2468 * the export->next pointer is valid before another CPU sees
2469 * the export pointer included into the list.
2471 rcu_assign_pointer(*list
, export
);
2475 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2477 struct trace_export
**p
;
2479 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
2486 rcu_assign_pointer(*p
, (*p
)->next
);
2492 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2495 ftrace_exports_enable();
2497 add_trace_export(list
, export
);
2501 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2505 ret
= rm_trace_export(list
, export
);
2507 ftrace_exports_disable();
2512 int register_ftrace_export(struct trace_export
*export
)
2514 if (WARN_ON_ONCE(!export
->write
))
2517 mutex_lock(&ftrace_export_lock
);
2519 add_ftrace_export(&ftrace_exports_list
, export
);
2521 mutex_unlock(&ftrace_export_lock
);
2525 EXPORT_SYMBOL_GPL(register_ftrace_export
);
2527 int unregister_ftrace_export(struct trace_export
*export
)
2531 mutex_lock(&ftrace_export_lock
);
2533 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
2535 mutex_unlock(&ftrace_export_lock
);
2539 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
2542 trace_function(struct trace_array
*tr
,
2543 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2546 struct trace_event_call
*call
= &event_function
;
2547 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2548 struct ring_buffer_event
*event
;
2549 struct ftrace_entry
*entry
;
2551 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2555 entry
= ring_buffer_event_data(event
);
2557 entry
->parent_ip
= parent_ip
;
2559 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2560 if (static_branch_unlikely(&ftrace_exports_enabled
))
2561 ftrace_exports(event
);
2562 __buffer_unlock_commit(buffer
, event
);
2566 #ifdef CONFIG_STACKTRACE
2568 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2569 struct ftrace_stack
{
2570 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
2573 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
2574 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2576 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
2577 unsigned long flags
,
2578 int skip
, int pc
, struct pt_regs
*regs
)
2580 struct trace_event_call
*call
= &event_kernel_stack
;
2581 struct ring_buffer_event
*event
;
2582 struct stack_entry
*entry
;
2583 struct stack_trace trace
;
2585 int size
= FTRACE_STACK_ENTRIES
;
2587 trace
.nr_entries
= 0;
2591 * Add two, for this function and the call to save_stack_trace()
2592 * If regs is set, then these functions will not be in the way.
2598 * Since events can happen in NMIs there's no safe way to
2599 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2600 * or NMI comes in, it will just have to use the default
2601 * FTRACE_STACK_SIZE.
2603 preempt_disable_notrace();
2605 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
2607 * We don't need any atomic variables, just a barrier.
2608 * If an interrupt comes in, we don't care, because it would
2609 * have exited and put the counter back to what we want.
2610 * We just need a barrier to keep gcc from moving things
2614 if (use_stack
== 1) {
2615 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
2616 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
2619 save_stack_trace_regs(regs
, &trace
);
2621 save_stack_trace(&trace
);
2623 if (trace
.nr_entries
> size
)
2624 size
= trace
.nr_entries
;
2626 /* From now on, use_stack is a boolean */
2629 size
*= sizeof(unsigned long);
2631 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2632 sizeof(*entry
) + size
, flags
, pc
);
2635 entry
= ring_buffer_event_data(event
);
2637 memset(&entry
->caller
, 0, size
);
2640 memcpy(&entry
->caller
, trace
.entries
,
2641 trace
.nr_entries
* sizeof(unsigned long));
2643 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2644 trace
.entries
= entry
->caller
;
2646 save_stack_trace_regs(regs
, &trace
);
2648 save_stack_trace(&trace
);
2651 entry
->size
= trace
.nr_entries
;
2653 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2654 __buffer_unlock_commit(buffer
, event
);
2657 /* Again, don't let gcc optimize things here */
2659 __this_cpu_dec(ftrace_stack_reserve
);
2660 preempt_enable_notrace();
2664 static inline void ftrace_trace_stack(struct trace_array
*tr
,
2665 struct ring_buffer
*buffer
,
2666 unsigned long flags
,
2667 int skip
, int pc
, struct pt_regs
*regs
)
2669 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
2672 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
2675 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
2678 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2680 if (rcu_is_watching()) {
2681 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2686 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2687 * but if the above rcu_is_watching() failed, then the NMI
2688 * triggered someplace critical, and rcu_irq_enter() should
2689 * not be called from NMI.
2691 if (unlikely(in_nmi()))
2695 * It is possible that a function is being traced in a
2696 * location that RCU is not watching. A call to
2697 * rcu_irq_enter() will make sure that it is, but there's
2698 * a few internal rcu functions that could be traced
2699 * where that wont work either. In those cases, we just
2702 if (unlikely(rcu_irq_enter_disabled()))
2705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2707 rcu_irq_exit_irqson();
2711 * trace_dump_stack - record a stack back trace in the trace buffer
2712 * @skip: Number of functions to skip (helper handlers)
2714 void trace_dump_stack(int skip
)
2716 unsigned long flags
;
2718 if (tracing_disabled
|| tracing_selftest_running
)
2721 local_save_flags(flags
);
2724 * Skip 3 more, seems to get us at the caller of
2728 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
2729 flags
, skip
, preempt_count(), NULL
);
2732 static DEFINE_PER_CPU(int, user_stack_count
);
2735 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
2737 struct trace_event_call
*call
= &event_user_stack
;
2738 struct ring_buffer_event
*event
;
2739 struct userstack_entry
*entry
;
2740 struct stack_trace trace
;
2742 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
2746 * NMIs can not handle page faults, even with fix ups.
2747 * The save user stack can (and often does) fault.
2749 if (unlikely(in_nmi()))
2753 * prevent recursion, since the user stack tracing may
2754 * trigger other kernel events.
2757 if (__this_cpu_read(user_stack_count
))
2760 __this_cpu_inc(user_stack_count
);
2762 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
2763 sizeof(*entry
), flags
, pc
);
2765 goto out_drop_count
;
2766 entry
= ring_buffer_event_data(event
);
2768 entry
->tgid
= current
->tgid
;
2769 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
2771 trace
.nr_entries
= 0;
2772 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2774 trace
.entries
= entry
->caller
;
2776 save_stack_trace_user(&trace
);
2777 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2778 __buffer_unlock_commit(buffer
, event
);
2781 __this_cpu_dec(user_stack_count
);
2787 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
2789 ftrace_trace_userstack(tr
, flags
, preempt_count());
2793 #endif /* CONFIG_STACKTRACE */
2795 /* created for use with alloc_percpu */
2796 struct trace_buffer_struct
{
2798 char buffer
[4][TRACE_BUF_SIZE
];
2801 static struct trace_buffer_struct
*trace_percpu_buffer
;
2804 * Thise allows for lockless recording. If we're nested too deeply, then
2805 * this returns NULL.
2807 static char *get_trace_buf(void)
2809 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
2811 if (!buffer
|| buffer
->nesting
>= 4)
2816 /* Interrupts must see nesting incremented before we use the buffer */
2818 return &buffer
->buffer
[buffer
->nesting
][0];
2821 static void put_trace_buf(void)
2823 /* Don't let the decrement of nesting leak before this */
2825 this_cpu_dec(trace_percpu_buffer
->nesting
);
2828 static int alloc_percpu_trace_buffer(void)
2830 struct trace_buffer_struct
*buffers
;
2832 buffers
= alloc_percpu(struct trace_buffer_struct
);
2833 if (WARN(!buffers
, "Could not allocate percpu trace_printk buffer"))
2836 trace_percpu_buffer
= buffers
;
2840 static int buffers_allocated
;
2842 void trace_printk_init_buffers(void)
2844 if (buffers_allocated
)
2847 if (alloc_percpu_trace_buffer())
2850 /* trace_printk() is for debug use only. Don't use it in production. */
2853 pr_warn("**********************************************************\n");
2854 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2856 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2858 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2859 pr_warn("** unsafe for production use. **\n");
2861 pr_warn("** If you see this message and you are not debugging **\n");
2862 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2864 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2865 pr_warn("**********************************************************\n");
2867 /* Expand the buffers to set size */
2868 tracing_update_buffers();
2870 buffers_allocated
= 1;
2873 * trace_printk_init_buffers() can be called by modules.
2874 * If that happens, then we need to start cmdline recording
2875 * directly here. If the global_trace.buffer is already
2876 * allocated here, then this was called by module code.
2878 if (global_trace
.trace_buffer
.buffer
)
2879 tracing_start_cmdline_record();
2882 void trace_printk_start_comm(void)
2884 /* Start tracing comms if trace printk is set */
2885 if (!buffers_allocated
)
2887 tracing_start_cmdline_record();
2890 static void trace_printk_start_stop_comm(int enabled
)
2892 if (!buffers_allocated
)
2896 tracing_start_cmdline_record();
2898 tracing_stop_cmdline_record();
2902 * trace_vbprintk - write binary msg to tracing buffer
2905 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2907 struct trace_event_call
*call
= &event_bprint
;
2908 struct ring_buffer_event
*event
;
2909 struct ring_buffer
*buffer
;
2910 struct trace_array
*tr
= &global_trace
;
2911 struct bprint_entry
*entry
;
2912 unsigned long flags
;
2914 int len
= 0, size
, pc
;
2916 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2919 /* Don't pollute graph traces with trace_vprintk internals */
2920 pause_graph_tracing();
2922 pc
= preempt_count();
2923 preempt_disable_notrace();
2925 tbuffer
= get_trace_buf();
2931 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2933 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2936 local_save_flags(flags
);
2937 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2938 buffer
= tr
->trace_buffer
.buffer
;
2939 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2943 entry
= ring_buffer_event_data(event
);
2947 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2948 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2949 __buffer_unlock_commit(buffer
, event
);
2950 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2957 preempt_enable_notrace();
2958 unpause_graph_tracing();
2962 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2966 __trace_array_vprintk(struct ring_buffer
*buffer
,
2967 unsigned long ip
, const char *fmt
, va_list args
)
2969 struct trace_event_call
*call
= &event_print
;
2970 struct ring_buffer_event
*event
;
2971 int len
= 0, size
, pc
;
2972 struct print_entry
*entry
;
2973 unsigned long flags
;
2976 if (tracing_disabled
|| tracing_selftest_running
)
2979 /* Don't pollute graph traces with trace_vprintk internals */
2980 pause_graph_tracing();
2982 pc
= preempt_count();
2983 preempt_disable_notrace();
2986 tbuffer
= get_trace_buf();
2992 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2994 local_save_flags(flags
);
2995 size
= sizeof(*entry
) + len
+ 1;
2996 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
3000 entry
= ring_buffer_event_data(event
);
3003 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
3004 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
3005 __buffer_unlock_commit(buffer
, event
);
3006 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3013 preempt_enable_notrace();
3014 unpause_graph_tracing();
3020 int trace_array_vprintk(struct trace_array
*tr
,
3021 unsigned long ip
, const char *fmt
, va_list args
)
3023 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
3027 int trace_array_printk(struct trace_array
*tr
,
3028 unsigned long ip
, const char *fmt
, ...)
3033 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3037 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3043 int trace_array_printk_buf(struct ring_buffer
*buffer
,
3044 unsigned long ip
, const char *fmt
, ...)
3049 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3053 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3059 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3061 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3063 EXPORT_SYMBOL_GPL(trace_vprintk
);
3065 static void trace_iterator_increment(struct trace_iterator
*iter
)
3067 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3071 ring_buffer_read(buf_iter
, NULL
);
3074 static struct trace_entry
*
3075 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3076 unsigned long *lost_events
)
3078 struct ring_buffer_event
*event
;
3079 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3082 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3084 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
3088 iter
->ent_size
= ring_buffer_event_length(event
);
3089 return ring_buffer_event_data(event
);
3095 static struct trace_entry
*
3096 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3097 unsigned long *missing_events
, u64
*ent_ts
)
3099 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
3100 struct trace_entry
*ent
, *next
= NULL
;
3101 unsigned long lost_events
= 0, next_lost
= 0;
3102 int cpu_file
= iter
->cpu_file
;
3103 u64 next_ts
= 0, ts
;
3109 * If we are in a per_cpu trace file, don't bother by iterating over
3110 * all cpu and peek directly.
3112 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3113 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3115 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3117 *ent_cpu
= cpu_file
;
3122 for_each_tracing_cpu(cpu
) {
3124 if (ring_buffer_empty_cpu(buffer
, cpu
))
3127 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3130 * Pick the entry with the smallest timestamp:
3132 if (ent
&& (!next
|| ts
< next_ts
)) {
3136 next_lost
= lost_events
;
3137 next_size
= iter
->ent_size
;
3141 iter
->ent_size
= next_size
;
3144 *ent_cpu
= next_cpu
;
3150 *missing_events
= next_lost
;
3155 /* Find the next real entry, without updating the iterator itself */
3156 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3157 int *ent_cpu
, u64
*ent_ts
)
3159 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3162 /* Find the next real entry, and increment the iterator to the next entry */
3163 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3165 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3166 &iter
->lost_events
, &iter
->ts
);
3169 trace_iterator_increment(iter
);
3171 return iter
->ent
? iter
: NULL
;
3174 static void trace_consume(struct trace_iterator
*iter
)
3176 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3177 &iter
->lost_events
);
3180 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3182 struct trace_iterator
*iter
= m
->private;
3186 WARN_ON_ONCE(iter
->leftover
);
3190 /* can't go backwards */
3195 ent
= trace_find_next_entry_inc(iter
);
3199 while (ent
&& iter
->idx
< i
)
3200 ent
= trace_find_next_entry_inc(iter
);
3207 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3209 struct ring_buffer_event
*event
;
3210 struct ring_buffer_iter
*buf_iter
;
3211 unsigned long entries
= 0;
3214 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
3216 buf_iter
= trace_buffer_iter(iter
, cpu
);
3220 ring_buffer_iter_reset(buf_iter
);
3223 * We could have the case with the max latency tracers
3224 * that a reset never took place on a cpu. This is evident
3225 * by the timestamp being before the start of the buffer.
3227 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
3228 if (ts
>= iter
->trace_buffer
->time_start
)
3231 ring_buffer_read(buf_iter
, NULL
);
3234 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
3238 * The current tracer is copied to avoid a global locking
3241 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3243 struct trace_iterator
*iter
= m
->private;
3244 struct trace_array
*tr
= iter
->tr
;
3245 int cpu_file
= iter
->cpu_file
;
3251 * copy the tracer to avoid using a global lock all around.
3252 * iter->trace is a copy of current_trace, the pointer to the
3253 * name may be used instead of a strcmp(), as iter->trace->name
3254 * will point to the same string as current_trace->name.
3256 mutex_lock(&trace_types_lock
);
3257 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3258 *iter
->trace
= *tr
->current_trace
;
3259 mutex_unlock(&trace_types_lock
);
3261 #ifdef CONFIG_TRACER_MAX_TRACE
3262 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3263 return ERR_PTR(-EBUSY
);
3266 if (!iter
->snapshot
)
3267 atomic_inc(&trace_record_taskinfo_disabled
);
3269 if (*pos
!= iter
->pos
) {
3274 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3275 for_each_tracing_cpu(cpu
)
3276 tracing_iter_reset(iter
, cpu
);
3278 tracing_iter_reset(iter
, cpu_file
);
3281 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3286 * If we overflowed the seq_file before, then we want
3287 * to just reuse the trace_seq buffer again.
3293 p
= s_next(m
, p
, &l
);
3297 trace_event_read_lock();
3298 trace_access_lock(cpu_file
);
3302 static void s_stop(struct seq_file
*m
, void *p
)
3304 struct trace_iterator
*iter
= m
->private;
3306 #ifdef CONFIG_TRACER_MAX_TRACE
3307 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3311 if (!iter
->snapshot
)
3312 atomic_dec(&trace_record_taskinfo_disabled
);
3314 trace_access_unlock(iter
->cpu_file
);
3315 trace_event_read_unlock();
3319 get_total_entries(struct trace_buffer
*buf
,
3320 unsigned long *total
, unsigned long *entries
)
3322 unsigned long count
;
3328 for_each_tracing_cpu(cpu
) {
3329 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3331 * If this buffer has skipped entries, then we hold all
3332 * entries for the trace and we need to ignore the
3333 * ones before the time stamp.
3335 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3336 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3337 /* total is the same as the entries */
3341 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3346 static void print_lat_help_header(struct seq_file
*m
)
3348 seq_puts(m
, "# _------=> CPU# \n"
3349 "# / _-----=> irqs-off \n"
3350 "# | / _----=> need-resched \n"
3351 "# || / _---=> hardirq/softirq \n"
3352 "# ||| / _--=> preempt-depth \n"
3354 "# cmd pid ||||| time | caller \n"
3355 "# \\ / ||||| \\ | / \n");
3358 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
3360 unsigned long total
;
3361 unsigned long entries
;
3363 get_total_entries(buf
, &total
, &entries
);
3364 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3365 entries
, total
, num_online_cpus());
3369 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
,
3372 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3374 print_event_info(buf
, m
);
3376 seq_printf(m
, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid
? "TGID " : "");
3377 seq_printf(m
, "# | | %s | | |\n", tgid
? " | " : "");
3380 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
,
3383 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3384 const char tgid_space
[] = " ";
3385 const char space
[] = " ";
3387 print_event_info(buf
, m
);
3389 seq_printf(m
, "# %s _-----=> irqs-off\n",
3390 tgid
? tgid_space
: space
);
3391 seq_printf(m
, "# %s / _----=> need-resched\n",
3392 tgid
? tgid_space
: space
);
3393 seq_printf(m
, "# %s| / _---=> hardirq/softirq\n",
3394 tgid
? tgid_space
: space
);
3395 seq_printf(m
, "# %s|| / _--=> preempt-depth\n",
3396 tgid
? tgid_space
: space
);
3397 seq_printf(m
, "# %s||| / delay\n",
3398 tgid
? tgid_space
: space
);
3399 seq_printf(m
, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3400 tgid
? " TGID " : space
);
3401 seq_printf(m
, "# | | %s | |||| | |\n",
3402 tgid
? " | " : space
);
3406 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3408 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3409 struct trace_buffer
*buf
= iter
->trace_buffer
;
3410 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3411 struct tracer
*type
= iter
->trace
;
3412 unsigned long entries
;
3413 unsigned long total
;
3414 const char *name
= "preemption";
3418 get_total_entries(buf
, &total
, &entries
);
3420 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3422 seq_puts(m
, "# -----------------------------------"
3423 "---------------------------------\n");
3424 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3425 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3426 nsecs_to_usecs(data
->saved_latency
),
3430 #if defined(CONFIG_PREEMPT_NONE)
3432 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3434 #elif defined(CONFIG_PREEMPT)
3439 /* These are reserved for later use */
3442 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3446 seq_puts(m
, "# -----------------\n");
3447 seq_printf(m
, "# | task: %.16s-%d "
3448 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3449 data
->comm
, data
->pid
,
3450 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3451 data
->policy
, data
->rt_priority
);
3452 seq_puts(m
, "# -----------------\n");
3454 if (data
->critical_start
) {
3455 seq_puts(m
, "# => started at: ");
3456 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3457 trace_print_seq(m
, &iter
->seq
);
3458 seq_puts(m
, "\n# => ended at: ");
3459 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3460 trace_print_seq(m
, &iter
->seq
);
3461 seq_puts(m
, "\n#\n");
3467 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3469 struct trace_seq
*s
= &iter
->seq
;
3470 struct trace_array
*tr
= iter
->tr
;
3472 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3475 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3478 if (cpumask_available(iter
->started
) &&
3479 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3482 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
3485 if (cpumask_available(iter
->started
))
3486 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3488 /* Don't print started cpu buffer for the first entry of the trace */
3490 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3494 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3496 struct trace_array
*tr
= iter
->tr
;
3497 struct trace_seq
*s
= &iter
->seq
;
3498 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3499 struct trace_entry
*entry
;
3500 struct trace_event
*event
;
3504 test_cpu_buff_start(iter
);
3506 event
= ftrace_find_event(entry
->type
);
3508 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3509 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3510 trace_print_lat_context(iter
);
3512 trace_print_context(iter
);
3515 if (trace_seq_has_overflowed(s
))
3516 return TRACE_TYPE_PARTIAL_LINE
;
3519 return event
->funcs
->trace(iter
, sym_flags
, event
);
3521 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3523 return trace_handle_return(s
);
3526 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3528 struct trace_array
*tr
= iter
->tr
;
3529 struct trace_seq
*s
= &iter
->seq
;
3530 struct trace_entry
*entry
;
3531 struct trace_event
*event
;
3535 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
3536 trace_seq_printf(s
, "%d %d %llu ",
3537 entry
->pid
, iter
->cpu
, iter
->ts
);
3539 if (trace_seq_has_overflowed(s
))
3540 return TRACE_TYPE_PARTIAL_LINE
;
3542 event
= ftrace_find_event(entry
->type
);
3544 return event
->funcs
->raw(iter
, 0, event
);
3546 trace_seq_printf(s
, "%d ?\n", entry
->type
);
3548 return trace_handle_return(s
);
3551 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
3553 struct trace_array
*tr
= iter
->tr
;
3554 struct trace_seq
*s
= &iter
->seq
;
3555 unsigned char newline
= '\n';
3556 struct trace_entry
*entry
;
3557 struct trace_event
*event
;
3561 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3562 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
3563 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
3564 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
3565 if (trace_seq_has_overflowed(s
))
3566 return TRACE_TYPE_PARTIAL_LINE
;
3569 event
= ftrace_find_event(entry
->type
);
3571 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
3572 if (ret
!= TRACE_TYPE_HANDLED
)
3576 SEQ_PUT_FIELD(s
, newline
);
3578 return trace_handle_return(s
);
3581 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
3583 struct trace_array
*tr
= iter
->tr
;
3584 struct trace_seq
*s
= &iter
->seq
;
3585 struct trace_entry
*entry
;
3586 struct trace_event
*event
;
3590 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3591 SEQ_PUT_FIELD(s
, entry
->pid
);
3592 SEQ_PUT_FIELD(s
, iter
->cpu
);
3593 SEQ_PUT_FIELD(s
, iter
->ts
);
3594 if (trace_seq_has_overflowed(s
))
3595 return TRACE_TYPE_PARTIAL_LINE
;
3598 event
= ftrace_find_event(entry
->type
);
3599 return event
? event
->funcs
->binary(iter
, 0, event
) :
3603 int trace_empty(struct trace_iterator
*iter
)
3605 struct ring_buffer_iter
*buf_iter
;
3608 /* If we are looking at one CPU buffer, only check that one */
3609 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
3610 cpu
= iter
->cpu_file
;
3611 buf_iter
= trace_buffer_iter(iter
, cpu
);
3613 if (!ring_buffer_iter_empty(buf_iter
))
3616 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3622 for_each_tracing_cpu(cpu
) {
3623 buf_iter
= trace_buffer_iter(iter
, cpu
);
3625 if (!ring_buffer_iter_empty(buf_iter
))
3628 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3636 /* Called with trace_event_read_lock() held. */
3637 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
3639 struct trace_array
*tr
= iter
->tr
;
3640 unsigned long trace_flags
= tr
->trace_flags
;
3641 enum print_line_t ret
;
3643 if (iter
->lost_events
) {
3644 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
3645 iter
->cpu
, iter
->lost_events
);
3646 if (trace_seq_has_overflowed(&iter
->seq
))
3647 return TRACE_TYPE_PARTIAL_LINE
;
3650 if (iter
->trace
&& iter
->trace
->print_line
) {
3651 ret
= iter
->trace
->print_line(iter
);
3652 if (ret
!= TRACE_TYPE_UNHANDLED
)
3656 if (iter
->ent
->type
== TRACE_BPUTS
&&
3657 trace_flags
& TRACE_ITER_PRINTK
&&
3658 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3659 return trace_print_bputs_msg_only(iter
);
3661 if (iter
->ent
->type
== TRACE_BPRINT
&&
3662 trace_flags
& TRACE_ITER_PRINTK
&&
3663 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3664 return trace_print_bprintk_msg_only(iter
);
3666 if (iter
->ent
->type
== TRACE_PRINT
&&
3667 trace_flags
& TRACE_ITER_PRINTK
&&
3668 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3669 return trace_print_printk_msg_only(iter
);
3671 if (trace_flags
& TRACE_ITER_BIN
)
3672 return print_bin_fmt(iter
);
3674 if (trace_flags
& TRACE_ITER_HEX
)
3675 return print_hex_fmt(iter
);
3677 if (trace_flags
& TRACE_ITER_RAW
)
3678 return print_raw_fmt(iter
);
3680 return print_trace_fmt(iter
);
3683 void trace_latency_header(struct seq_file
*m
)
3685 struct trace_iterator
*iter
= m
->private;
3686 struct trace_array
*tr
= iter
->tr
;
3688 /* print nothing if the buffers are empty */
3689 if (trace_empty(iter
))
3692 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3693 print_trace_header(m
, iter
);
3695 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
3696 print_lat_help_header(m
);
3699 void trace_default_header(struct seq_file
*m
)
3701 struct trace_iterator
*iter
= m
->private;
3702 struct trace_array
*tr
= iter
->tr
;
3703 unsigned long trace_flags
= tr
->trace_flags
;
3705 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
3708 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
3709 /* print nothing if the buffers are empty */
3710 if (trace_empty(iter
))
3712 print_trace_header(m
, iter
);
3713 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
3714 print_lat_help_header(m
);
3716 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
3717 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
3718 print_func_help_header_irq(iter
->trace_buffer
,
3721 print_func_help_header(iter
->trace_buffer
, m
,
3727 static void test_ftrace_alive(struct seq_file
*m
)
3729 if (!ftrace_is_dead())
3731 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3732 "# MAY BE MISSING FUNCTION EVENTS\n");
3735 #ifdef CONFIG_TRACER_MAX_TRACE
3736 static void show_snapshot_main_help(struct seq_file
*m
)
3738 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3739 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3740 "# Takes a snapshot of the main buffer.\n"
3741 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3742 "# (Doesn't have to be '2' works with any number that\n"
3743 "# is not a '0' or '1')\n");
3746 static void show_snapshot_percpu_help(struct seq_file
*m
)
3748 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3749 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3750 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3751 "# Takes a snapshot of the main buffer for this cpu.\n");
3753 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
3754 "# Must use main snapshot file to allocate.\n");
3756 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3757 "# (Doesn't have to be '2' works with any number that\n"
3758 "# is not a '0' or '1')\n");
3761 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
3763 if (iter
->tr
->allocated_snapshot
)
3764 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
3766 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
3768 seq_puts(m
, "# Snapshot commands:\n");
3769 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
3770 show_snapshot_main_help(m
);
3772 show_snapshot_percpu_help(m
);
3775 /* Should never be called */
3776 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
3779 static int s_show(struct seq_file
*m
, void *v
)
3781 struct trace_iterator
*iter
= v
;
3784 if (iter
->ent
== NULL
) {
3786 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
3788 test_ftrace_alive(m
);
3790 if (iter
->snapshot
&& trace_empty(iter
))
3791 print_snapshot_help(m
, iter
);
3792 else if (iter
->trace
&& iter
->trace
->print_header
)
3793 iter
->trace
->print_header(m
);
3795 trace_default_header(m
);
3797 } else if (iter
->leftover
) {
3799 * If we filled the seq_file buffer earlier, we
3800 * want to just show it now.
3802 ret
= trace_print_seq(m
, &iter
->seq
);
3804 /* ret should this time be zero, but you never know */
3805 iter
->leftover
= ret
;
3808 print_trace_line(iter
);
3809 ret
= trace_print_seq(m
, &iter
->seq
);
3811 * If we overflow the seq_file buffer, then it will
3812 * ask us for this data again at start up.
3814 * ret is 0 if seq_file write succeeded.
3817 iter
->leftover
= ret
;
3824 * Should be used after trace_array_get(), trace_types_lock
3825 * ensures that i_cdev was already initialized.
3827 static inline int tracing_get_cpu(struct inode
*inode
)
3829 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3830 return (long)inode
->i_cdev
- 1;
3831 return RING_BUFFER_ALL_CPUS
;
3834 static const struct seq_operations tracer_seq_ops
= {
3841 static struct trace_iterator
*
3842 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3844 struct trace_array
*tr
= inode
->i_private
;
3845 struct trace_iterator
*iter
;
3848 if (tracing_disabled
)
3849 return ERR_PTR(-ENODEV
);
3851 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3853 return ERR_PTR(-ENOMEM
);
3855 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3857 if (!iter
->buffer_iter
)
3861 * We make a copy of the current tracer to avoid concurrent
3862 * changes on it while we are reading.
3864 mutex_lock(&trace_types_lock
);
3865 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3869 *iter
->trace
= *tr
->current_trace
;
3871 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3876 #ifdef CONFIG_TRACER_MAX_TRACE
3877 /* Currently only the top directory has a snapshot */
3878 if (tr
->current_trace
->print_max
|| snapshot
)
3879 iter
->trace_buffer
= &tr
->max_buffer
;
3882 iter
->trace_buffer
= &tr
->trace_buffer
;
3883 iter
->snapshot
= snapshot
;
3885 iter
->cpu_file
= tracing_get_cpu(inode
);
3886 mutex_init(&iter
->mutex
);
3888 /* Notify the tracer early; before we stop tracing. */
3889 if (iter
->trace
&& iter
->trace
->open
)
3890 iter
->trace
->open(iter
);
3892 /* Annotate start of buffers if we had overruns */
3893 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3894 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3896 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3897 if (trace_clocks
[tr
->clock_id
].in_ns
)
3898 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3900 /* stop the trace while dumping if we are not opening "snapshot" */
3901 if (!iter
->snapshot
)
3902 tracing_stop_tr(tr
);
3904 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3905 for_each_tracing_cpu(cpu
) {
3906 iter
->buffer_iter
[cpu
] =
3907 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
,
3910 ring_buffer_read_prepare_sync();
3911 for_each_tracing_cpu(cpu
) {
3912 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3913 tracing_iter_reset(iter
, cpu
);
3916 cpu
= iter
->cpu_file
;
3917 iter
->buffer_iter
[cpu
] =
3918 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
,
3920 ring_buffer_read_prepare_sync();
3921 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3922 tracing_iter_reset(iter
, cpu
);
3925 mutex_unlock(&trace_types_lock
);
3930 mutex_unlock(&trace_types_lock
);
3932 kfree(iter
->buffer_iter
);
3934 seq_release_private(inode
, file
);
3935 return ERR_PTR(-ENOMEM
);
3938 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3940 if (tracing_disabled
)
3943 filp
->private_data
= inode
->i_private
;
3947 bool tracing_is_disabled(void)
3949 return (tracing_disabled
) ? true: false;
3953 * Open and update trace_array ref count.
3954 * Must have the current trace_array passed to it.
3956 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3958 struct trace_array
*tr
= inode
->i_private
;
3960 if (tracing_disabled
)
3963 if (trace_array_get(tr
) < 0)
3966 filp
->private_data
= inode
->i_private
;
3971 static int tracing_release(struct inode
*inode
, struct file
*file
)
3973 struct trace_array
*tr
= inode
->i_private
;
3974 struct seq_file
*m
= file
->private_data
;
3975 struct trace_iterator
*iter
;
3978 if (!(file
->f_mode
& FMODE_READ
)) {
3979 trace_array_put(tr
);
3983 /* Writes do not use seq_file */
3985 mutex_lock(&trace_types_lock
);
3987 for_each_tracing_cpu(cpu
) {
3988 if (iter
->buffer_iter
[cpu
])
3989 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3992 if (iter
->trace
&& iter
->trace
->close
)
3993 iter
->trace
->close(iter
);
3995 if (!iter
->snapshot
)
3996 /* reenable tracing if it was previously enabled */
3997 tracing_start_tr(tr
);
3999 __trace_array_put(tr
);
4001 mutex_unlock(&trace_types_lock
);
4003 mutex_destroy(&iter
->mutex
);
4004 free_cpumask_var(iter
->started
);
4006 kfree(iter
->buffer_iter
);
4007 seq_release_private(inode
, file
);
4012 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
4014 struct trace_array
*tr
= inode
->i_private
;
4016 trace_array_put(tr
);
4020 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4022 struct trace_array
*tr
= inode
->i_private
;
4024 trace_array_put(tr
);
4026 return single_release(inode
, file
);
4029 static int tracing_open(struct inode
*inode
, struct file
*file
)
4031 struct trace_array
*tr
= inode
->i_private
;
4032 struct trace_iterator
*iter
;
4035 if (trace_array_get(tr
) < 0)
4038 /* If this file was open for write, then erase contents */
4039 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4040 int cpu
= tracing_get_cpu(inode
);
4041 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
4043 #ifdef CONFIG_TRACER_MAX_TRACE
4044 if (tr
->current_trace
->print_max
)
4045 trace_buf
= &tr
->max_buffer
;
4048 if (cpu
== RING_BUFFER_ALL_CPUS
)
4049 tracing_reset_online_cpus(trace_buf
);
4051 tracing_reset(trace_buf
, cpu
);
4054 if (file
->f_mode
& FMODE_READ
) {
4055 iter
= __tracing_open(inode
, file
, false);
4057 ret
= PTR_ERR(iter
);
4058 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4059 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4063 trace_array_put(tr
);
4069 * Some tracers are not suitable for instance buffers.
4070 * A tracer is always available for the global array (toplevel)
4071 * or if it explicitly states that it is.
4074 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4076 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4079 /* Find the next tracer that this trace array may use */
4080 static struct tracer
*
4081 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4083 while (t
&& !trace_ok_for_array(t
, tr
))
4090 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4092 struct trace_array
*tr
= m
->private;
4093 struct tracer
*t
= v
;
4098 t
= get_tracer_for_array(tr
, t
->next
);
4103 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4105 struct trace_array
*tr
= m
->private;
4109 mutex_lock(&trace_types_lock
);
4111 t
= get_tracer_for_array(tr
, trace_types
);
4112 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4118 static void t_stop(struct seq_file
*m
, void *p
)
4120 mutex_unlock(&trace_types_lock
);
4123 static int t_show(struct seq_file
*m
, void *v
)
4125 struct tracer
*t
= v
;
4130 seq_puts(m
, t
->name
);
4139 static const struct seq_operations show_traces_seq_ops
= {
4146 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4148 struct trace_array
*tr
= inode
->i_private
;
4152 if (tracing_disabled
)
4155 ret
= seq_open(file
, &show_traces_seq_ops
);
4159 m
= file
->private_data
;
4166 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4167 size_t count
, loff_t
*ppos
)
4172 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4176 if (file
->f_mode
& FMODE_READ
)
4177 ret
= seq_lseek(file
, offset
, whence
);
4179 file
->f_pos
= ret
= 0;
4184 static const struct file_operations tracing_fops
= {
4185 .open
= tracing_open
,
4187 .write
= tracing_write_stub
,
4188 .llseek
= tracing_lseek
,
4189 .release
= tracing_release
,
4192 static const struct file_operations show_traces_fops
= {
4193 .open
= show_traces_open
,
4195 .release
= seq_release
,
4196 .llseek
= seq_lseek
,
4200 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4201 size_t count
, loff_t
*ppos
)
4203 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4207 len
= snprintf(NULL
, 0, "%*pb\n",
4208 cpumask_pr_args(tr
->tracing_cpumask
)) + 1;
4209 mask_str
= kmalloc(len
, GFP_KERNEL
);
4213 len
= snprintf(mask_str
, len
, "%*pb\n",
4214 cpumask_pr_args(tr
->tracing_cpumask
));
4219 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, len
);
4228 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4229 size_t count
, loff_t
*ppos
)
4231 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4232 cpumask_var_t tracing_cpumask_new
;
4235 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4238 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4242 local_irq_disable();
4243 arch_spin_lock(&tr
->max_lock
);
4244 for_each_tracing_cpu(cpu
) {
4246 * Increase/decrease the disabled counter if we are
4247 * about to flip a bit in the cpumask:
4249 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4250 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4251 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4252 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4254 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4255 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4256 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4257 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4260 arch_spin_unlock(&tr
->max_lock
);
4263 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4264 free_cpumask_var(tracing_cpumask_new
);
4269 free_cpumask_var(tracing_cpumask_new
);
4274 static const struct file_operations tracing_cpumask_fops
= {
4275 .open
= tracing_open_generic_tr
,
4276 .read
= tracing_cpumask_read
,
4277 .write
= tracing_cpumask_write
,
4278 .release
= tracing_release_generic_tr
,
4279 .llseek
= generic_file_llseek
,
4282 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4284 struct tracer_opt
*trace_opts
;
4285 struct trace_array
*tr
= m
->private;
4289 mutex_lock(&trace_types_lock
);
4290 tracer_flags
= tr
->current_trace
->flags
->val
;
4291 trace_opts
= tr
->current_trace
->flags
->opts
;
4293 for (i
= 0; trace_options
[i
]; i
++) {
4294 if (tr
->trace_flags
& (1 << i
))
4295 seq_printf(m
, "%s\n", trace_options
[i
]);
4297 seq_printf(m
, "no%s\n", trace_options
[i
]);
4300 for (i
= 0; trace_opts
[i
].name
; i
++) {
4301 if (tracer_flags
& trace_opts
[i
].bit
)
4302 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4304 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4306 mutex_unlock(&trace_types_lock
);
4311 static int __set_tracer_option(struct trace_array
*tr
,
4312 struct tracer_flags
*tracer_flags
,
4313 struct tracer_opt
*opts
, int neg
)
4315 struct tracer
*trace
= tracer_flags
->trace
;
4318 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4323 tracer_flags
->val
&= ~opts
->bit
;
4325 tracer_flags
->val
|= opts
->bit
;
4329 /* Try to assign a tracer specific option */
4330 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4332 struct tracer
*trace
= tr
->current_trace
;
4333 struct tracer_flags
*tracer_flags
= trace
->flags
;
4334 struct tracer_opt
*opts
= NULL
;
4337 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4338 opts
= &tracer_flags
->opts
[i
];
4340 if (strcmp(cmp
, opts
->name
) == 0)
4341 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4347 /* Some tracers require overwrite to stay enabled */
4348 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4350 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4356 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4358 /* do nothing if flag is already set */
4359 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4362 /* Give the tracer a chance to approve the change */
4363 if (tr
->current_trace
->flag_changed
)
4364 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4368 tr
->trace_flags
|= mask
;
4370 tr
->trace_flags
&= ~mask
;
4372 if (mask
== TRACE_ITER_RECORD_CMD
)
4373 trace_event_enable_cmd_record(enabled
);
4375 if (mask
== TRACE_ITER_RECORD_TGID
) {
4377 tgid_map
= kzalloc((PID_MAX_DEFAULT
+ 1) * sizeof(*tgid_map
),
4380 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4384 trace_event_enable_tgid_record(enabled
);
4387 if (mask
== TRACE_ITER_EVENT_FORK
)
4388 trace_event_follow_fork(tr
, enabled
);
4390 if (mask
== TRACE_ITER_FUNC_FORK
)
4391 ftrace_pid_follow_fork(tr
, enabled
);
4393 if (mask
== TRACE_ITER_OVERWRITE
) {
4394 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
4395 #ifdef CONFIG_TRACER_MAX_TRACE
4396 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4400 if (mask
== TRACE_ITER_PRINTK
) {
4401 trace_printk_start_stop_comm(enabled
);
4402 trace_printk_control(enabled
);
4408 static int trace_set_options(struct trace_array
*tr
, char *option
)
4414 size_t orig_len
= strlen(option
);
4416 cmp
= strstrip(option
);
4418 if (strncmp(cmp
, "no", 2) == 0) {
4423 mutex_lock(&trace_types_lock
);
4425 for (i
= 0; trace_options
[i
]; i
++) {
4426 if (strcmp(cmp
, trace_options
[i
]) == 0) {
4427 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
4432 /* If no option could be set, test the specific tracer options */
4433 if (!trace_options
[i
])
4434 ret
= set_tracer_option(tr
, cmp
, neg
);
4436 mutex_unlock(&trace_types_lock
);
4439 * If the first trailing whitespace is replaced with '\0' by strstrip,
4440 * turn it back into a space.
4442 if (orig_len
> strlen(option
))
4443 option
[strlen(option
)] = ' ';
4448 static void __init
apply_trace_boot_options(void)
4450 char *buf
= trace_boot_options_buf
;
4454 option
= strsep(&buf
, ",");
4460 trace_set_options(&global_trace
, option
);
4462 /* Put back the comma to allow this to be called again */
4469 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
4470 size_t cnt
, loff_t
*ppos
)
4472 struct seq_file
*m
= filp
->private_data
;
4473 struct trace_array
*tr
= m
->private;
4477 if (cnt
>= sizeof(buf
))
4480 if (copy_from_user(buf
, ubuf
, cnt
))
4485 ret
= trace_set_options(tr
, buf
);
4494 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
4496 struct trace_array
*tr
= inode
->i_private
;
4499 if (tracing_disabled
)
4502 if (trace_array_get(tr
) < 0)
4505 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
4507 trace_array_put(tr
);
4512 static const struct file_operations tracing_iter_fops
= {
4513 .open
= tracing_trace_options_open
,
4515 .llseek
= seq_lseek
,
4516 .release
= tracing_single_release_tr
,
4517 .write
= tracing_trace_options_write
,
4520 static const char readme_msg
[] =
4521 "tracing mini-HOWTO:\n\n"
4522 "# echo 0 > tracing_on : quick way to disable tracing\n"
4523 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4524 " Important files:\n"
4525 " trace\t\t\t- The static contents of the buffer\n"
4526 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4527 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4528 " current_tracer\t- function and latency tracers\n"
4529 " available_tracers\t- list of configured tracers for current_tracer\n"
4530 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4531 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4532 " trace_clock\t\t-change the clock used to order events\n"
4533 " local: Per cpu clock but may not be synced across CPUs\n"
4534 " global: Synced across CPUs but slows tracing down.\n"
4535 " counter: Not a clock, but just an increment\n"
4536 " uptime: Jiffy counter from time of boot\n"
4537 " perf: Same clock that perf events use\n"
4538 #ifdef CONFIG_X86_64
4539 " x86-tsc: TSC cycle counter\n"
4541 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4542 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4543 " tracing_cpumask\t- Limit which CPUs to trace\n"
4544 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4545 "\t\t\t Remove sub-buffer with rmdir\n"
4546 " trace_options\t\t- Set format or modify how tracing happens\n"
4547 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4548 "\t\t\t option name\n"
4549 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4550 #ifdef CONFIG_DYNAMIC_FTRACE
4551 "\n available_filter_functions - list of functions that can be filtered on\n"
4552 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4553 "\t\t\t functions\n"
4554 "\t accepts: func_full_name or glob-matching-pattern\n"
4555 "\t modules: Can select a group via module\n"
4556 "\t Format: :mod:<module-name>\n"
4557 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4558 "\t triggers: a command to perform when function is hit\n"
4559 "\t Format: <function>:<trigger>[:count]\n"
4560 "\t trigger: traceon, traceoff\n"
4561 "\t\t enable_event:<system>:<event>\n"
4562 "\t\t disable_event:<system>:<event>\n"
4563 #ifdef CONFIG_STACKTRACE
4566 #ifdef CONFIG_TRACER_SNAPSHOT
4571 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4572 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4573 "\t The first one will disable tracing every time do_fault is hit\n"
4574 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4575 "\t The first time do trap is hit and it disables tracing, the\n"
4576 "\t counter will decrement to 2. If tracing is already disabled,\n"
4577 "\t the counter will not decrement. It only decrements when the\n"
4578 "\t trigger did work\n"
4579 "\t To remove trigger without count:\n"
4580 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4581 "\t To remove trigger with a count:\n"
4582 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4583 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4584 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4585 "\t modules: Can select a group via module command :mod:\n"
4586 "\t Does not accept triggers\n"
4587 #endif /* CONFIG_DYNAMIC_FTRACE */
4588 #ifdef CONFIG_FUNCTION_TRACER
4589 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4592 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4593 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4594 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4595 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4597 #ifdef CONFIG_TRACER_SNAPSHOT
4598 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4599 "\t\t\t snapshot buffer. Read the contents for more\n"
4600 "\t\t\t information\n"
4602 #ifdef CONFIG_STACK_TRACER
4603 " stack_trace\t\t- Shows the max stack trace when active\n"
4604 " stack_max_size\t- Shows current max stack size that was traced\n"
4605 "\t\t\t Write into this file to reset the max size (trigger a\n"
4606 "\t\t\t new trace)\n"
4607 #ifdef CONFIG_DYNAMIC_FTRACE
4608 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4611 #endif /* CONFIG_STACK_TRACER */
4612 #ifdef CONFIG_KPROBE_EVENTS
4613 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4614 "\t\t\t Write into this file to define/undefine new trace events.\n"
4616 #ifdef CONFIG_UPROBE_EVENTS
4617 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4618 "\t\t\t Write into this file to define/undefine new trace events.\n"
4620 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4621 "\t accepts: event-definitions (one definition per line)\n"
4622 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4623 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4624 "\t -:[<group>/]<event>\n"
4625 #ifdef CONFIG_KPROBE_EVENTS
4626 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4627 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4629 #ifdef CONFIG_UPROBE_EVENTS
4630 "\t place: <path>:<offset>\n"
4632 "\t args: <name>=fetcharg[:type]\n"
4633 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4634 "\t $stack<index>, $stack, $retval, $comm\n"
4635 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4636 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4638 " events/\t\t- Directory containing all trace event subsystems:\n"
4639 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4640 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4641 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4643 " filter\t\t- If set, only events passing filter are traced\n"
4644 " events/<system>/<event>/\t- Directory containing control files for\n"
4646 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4647 " filter\t\t- If set, only events passing filter are traced\n"
4648 " trigger\t\t- If set, a command to perform when event is hit\n"
4649 "\t Format: <trigger>[:count][if <filter>]\n"
4650 "\t trigger: traceon, traceoff\n"
4651 "\t enable_event:<system>:<event>\n"
4652 "\t disable_event:<system>:<event>\n"
4653 #ifdef CONFIG_HIST_TRIGGERS
4654 "\t enable_hist:<system>:<event>\n"
4655 "\t disable_hist:<system>:<event>\n"
4657 #ifdef CONFIG_STACKTRACE
4660 #ifdef CONFIG_TRACER_SNAPSHOT
4663 #ifdef CONFIG_HIST_TRIGGERS
4664 "\t\t hist (see below)\n"
4666 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4667 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4668 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4669 "\t events/block/block_unplug/trigger\n"
4670 "\t The first disables tracing every time block_unplug is hit.\n"
4671 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4672 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4673 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4674 "\t Like function triggers, the counter is only decremented if it\n"
4675 "\t enabled or disabled tracing.\n"
4676 "\t To remove a trigger without a count:\n"
4677 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4678 "\t To remove a trigger with a count:\n"
4679 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4680 "\t Filters can be ignored when removing a trigger.\n"
4681 #ifdef CONFIG_HIST_TRIGGERS
4682 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4683 "\t Format: hist:keys=<field1[,field2,...]>\n"
4684 "\t [:values=<field1[,field2,...]>]\n"
4685 "\t [:sort=<field1[,field2,...]>]\n"
4686 "\t [:size=#entries]\n"
4687 "\t [:pause][:continue][:clear]\n"
4688 "\t [:name=histname1]\n"
4689 "\t [if <filter>]\n\n"
4690 "\t When a matching event is hit, an entry is added to a hash\n"
4691 "\t table using the key(s) and value(s) named, and the value of a\n"
4692 "\t sum called 'hitcount' is incremented. Keys and values\n"
4693 "\t correspond to fields in the event's format description. Keys\n"
4694 "\t can be any field, or the special string 'stacktrace'.\n"
4695 "\t Compound keys consisting of up to two fields can be specified\n"
4696 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4697 "\t fields. Sort keys consisting of up to two fields can be\n"
4698 "\t specified using the 'sort' keyword. The sort direction can\n"
4699 "\t be modified by appending '.descending' or '.ascending' to a\n"
4700 "\t sort field. The 'size' parameter can be used to specify more\n"
4701 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4702 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4703 "\t its histogram data will be shared with other triggers of the\n"
4704 "\t same name, and trigger hits will update this common data.\n\n"
4705 "\t Reading the 'hist' file for the event will dump the hash\n"
4706 "\t table in its entirety to stdout. If there are multiple hist\n"
4707 "\t triggers attached to an event, there will be a table for each\n"
4708 "\t trigger in the output. The table displayed for a named\n"
4709 "\t trigger will be the same as any other instance having the\n"
4710 "\t same name. The default format used to display a given field\n"
4711 "\t can be modified by appending any of the following modifiers\n"
4712 "\t to the field name, as applicable:\n\n"
4713 "\t .hex display a number as a hex value\n"
4714 "\t .sym display an address as a symbol\n"
4715 "\t .sym-offset display an address as a symbol and offset\n"
4716 "\t .execname display a common_pid as a program name\n"
4717 "\t .syscall display a syscall id as a syscall name\n\n"
4718 "\t .log2 display log2 value rather than raw number\n\n"
4719 "\t The 'pause' parameter can be used to pause an existing hist\n"
4720 "\t trigger or to start a hist trigger but not log any events\n"
4721 "\t until told to do so. 'continue' can be used to start or\n"
4722 "\t restart a paused hist trigger.\n\n"
4723 "\t The 'clear' parameter will clear the contents of a running\n"
4724 "\t hist trigger and leave its current paused/active state\n"
4726 "\t The enable_hist and disable_hist triggers can be used to\n"
4727 "\t have one event conditionally start and stop another event's\n"
4728 "\t already-attached hist trigger. The syntax is analagous to\n"
4729 "\t the enable_event and disable_event triggers.\n"
4734 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
4735 size_t cnt
, loff_t
*ppos
)
4737 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
4738 readme_msg
, strlen(readme_msg
));
4741 static const struct file_operations tracing_readme_fops
= {
4742 .open
= tracing_open_generic
,
4743 .read
= tracing_readme_read
,
4744 .llseek
= generic_file_llseek
,
4747 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4751 if (*pos
|| m
->count
)
4756 for (; ptr
<= &tgid_map
[PID_MAX_DEFAULT
]; ptr
++) {
4757 if (trace_find_tgid(*ptr
))
4764 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
4774 v
= saved_tgids_next(m
, v
, &l
);
4782 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
4786 static int saved_tgids_show(struct seq_file
*m
, void *v
)
4788 int pid
= (int *)v
- tgid_map
;
4790 seq_printf(m
, "%d %d\n", pid
, trace_find_tgid(pid
));
4794 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
4795 .start
= saved_tgids_start
,
4796 .stop
= saved_tgids_stop
,
4797 .next
= saved_tgids_next
,
4798 .show
= saved_tgids_show
,
4801 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
4803 if (tracing_disabled
)
4806 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
4810 static const struct file_operations tracing_saved_tgids_fops
= {
4811 .open
= tracing_saved_tgids_open
,
4813 .llseek
= seq_lseek
,
4814 .release
= seq_release
,
4817 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4819 unsigned int *ptr
= v
;
4821 if (*pos
|| m
->count
)
4826 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
4828 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
4837 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
4843 arch_spin_lock(&trace_cmdline_lock
);
4845 v
= &savedcmd
->map_cmdline_to_pid
[0];
4847 v
= saved_cmdlines_next(m
, v
, &l
);
4855 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
4857 arch_spin_unlock(&trace_cmdline_lock
);
4861 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
4863 char buf
[TASK_COMM_LEN
];
4864 unsigned int *pid
= v
;
4866 __trace_find_cmdline(*pid
, buf
);
4867 seq_printf(m
, "%d %s\n", *pid
, buf
);
4871 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
4872 .start
= saved_cmdlines_start
,
4873 .next
= saved_cmdlines_next
,
4874 .stop
= saved_cmdlines_stop
,
4875 .show
= saved_cmdlines_show
,
4878 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
4880 if (tracing_disabled
)
4883 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
4886 static const struct file_operations tracing_saved_cmdlines_fops
= {
4887 .open
= tracing_saved_cmdlines_open
,
4889 .llseek
= seq_lseek
,
4890 .release
= seq_release
,
4894 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
4895 size_t cnt
, loff_t
*ppos
)
4900 arch_spin_lock(&trace_cmdline_lock
);
4901 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
4902 arch_spin_unlock(&trace_cmdline_lock
);
4904 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4907 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
4909 kfree(s
->saved_cmdlines
);
4910 kfree(s
->map_cmdline_to_pid
);
4914 static int tracing_resize_saved_cmdlines(unsigned int val
)
4916 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
4918 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
4922 if (allocate_cmdlines_buffer(val
, s
) < 0) {
4927 arch_spin_lock(&trace_cmdline_lock
);
4928 savedcmd_temp
= savedcmd
;
4930 arch_spin_unlock(&trace_cmdline_lock
);
4931 free_saved_cmdlines_buffer(savedcmd_temp
);
4937 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
4938 size_t cnt
, loff_t
*ppos
)
4943 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4947 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4948 if (!val
|| val
> PID_MAX_DEFAULT
)
4951 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
4960 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
4961 .open
= tracing_open_generic
,
4962 .read
= tracing_saved_cmdlines_size_read
,
4963 .write
= tracing_saved_cmdlines_size_write
,
4966 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4967 static union trace_eval_map_item
*
4968 update_eval_map(union trace_eval_map_item
*ptr
)
4970 if (!ptr
->map
.eval_string
) {
4971 if (ptr
->tail
.next
) {
4972 ptr
= ptr
->tail
.next
;
4973 /* Set ptr to the next real item (skip head) */
4981 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4983 union trace_eval_map_item
*ptr
= v
;
4986 * Paranoid! If ptr points to end, we don't want to increment past it.
4987 * This really should never happen.
4989 ptr
= update_eval_map(ptr
);
4990 if (WARN_ON_ONCE(!ptr
))
4997 ptr
= update_eval_map(ptr
);
5002 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
5004 union trace_eval_map_item
*v
;
5007 mutex_lock(&trace_eval_mutex
);
5009 v
= trace_eval_maps
;
5013 while (v
&& l
< *pos
) {
5014 v
= eval_map_next(m
, v
, &l
);
5020 static void eval_map_stop(struct seq_file
*m
, void *v
)
5022 mutex_unlock(&trace_eval_mutex
);
5025 static int eval_map_show(struct seq_file
*m
, void *v
)
5027 union trace_eval_map_item
*ptr
= v
;
5029 seq_printf(m
, "%s %ld (%s)\n",
5030 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5036 static const struct seq_operations tracing_eval_map_seq_ops
= {
5037 .start
= eval_map_start
,
5038 .next
= eval_map_next
,
5039 .stop
= eval_map_stop
,
5040 .show
= eval_map_show
,
5043 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5045 if (tracing_disabled
)
5048 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5051 static const struct file_operations tracing_eval_map_fops
= {
5052 .open
= tracing_eval_map_open
,
5054 .llseek
= seq_lseek
,
5055 .release
= seq_release
,
5058 static inline union trace_eval_map_item
*
5059 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5061 /* Return tail of array given the head */
5062 return ptr
+ ptr
->head
.length
+ 1;
5066 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5069 struct trace_eval_map
**stop
;
5070 struct trace_eval_map
**map
;
5071 union trace_eval_map_item
*map_array
;
5072 union trace_eval_map_item
*ptr
;
5077 * The trace_eval_maps contains the map plus a head and tail item,
5078 * where the head holds the module and length of array, and the
5079 * tail holds a pointer to the next list.
5081 map_array
= kmalloc(sizeof(*map_array
) * (len
+ 2), GFP_KERNEL
);
5083 pr_warn("Unable to allocate trace eval mapping\n");
5087 mutex_lock(&trace_eval_mutex
);
5089 if (!trace_eval_maps
)
5090 trace_eval_maps
= map_array
;
5092 ptr
= trace_eval_maps
;
5094 ptr
= trace_eval_jmp_to_tail(ptr
);
5095 if (!ptr
->tail
.next
)
5097 ptr
= ptr
->tail
.next
;
5100 ptr
->tail
.next
= map_array
;
5102 map_array
->head
.mod
= mod
;
5103 map_array
->head
.length
= len
;
5106 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5107 map_array
->map
= **map
;
5110 memset(map_array
, 0, sizeof(*map_array
));
5112 mutex_unlock(&trace_eval_mutex
);
5115 static void trace_create_eval_file(struct dentry
*d_tracer
)
5117 trace_create_file("eval_map", 0444, d_tracer
,
5118 NULL
, &tracing_eval_map_fops
);
5121 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5122 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5123 static inline void trace_insert_eval_map_file(struct module
*mod
,
5124 struct trace_eval_map
**start
, int len
) { }
5125 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5127 static void trace_insert_eval_map(struct module
*mod
,
5128 struct trace_eval_map
**start
, int len
)
5130 struct trace_eval_map
**map
;
5137 trace_event_eval_update(map
, len
);
5139 trace_insert_eval_map_file(mod
, start
, len
);
5143 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5144 size_t cnt
, loff_t
*ppos
)
5146 struct trace_array
*tr
= filp
->private_data
;
5147 char buf
[MAX_TRACER_SIZE
+2];
5150 mutex_lock(&trace_types_lock
);
5151 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5152 mutex_unlock(&trace_types_lock
);
5154 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5157 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5159 tracing_reset_online_cpus(&tr
->trace_buffer
);
5163 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
5167 for_each_tracing_cpu(cpu
)
5168 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5171 #ifdef CONFIG_TRACER_MAX_TRACE
5172 /* resize @tr's buffer to the size of @size_tr's entries */
5173 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
5174 struct trace_buffer
*size_buf
, int cpu_id
)
5178 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5179 for_each_tracing_cpu(cpu
) {
5180 ret
= ring_buffer_resize(trace_buf
->buffer
,
5181 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5184 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5185 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5188 ret
= ring_buffer_resize(trace_buf
->buffer
,
5189 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5191 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5192 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5197 #endif /* CONFIG_TRACER_MAX_TRACE */
5199 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5200 unsigned long size
, int cpu
)
5205 * If kernel or user changes the size of the ring buffer
5206 * we use the size that was given, and we can forget about
5207 * expanding it later.
5209 ring_buffer_expanded
= true;
5211 /* May be called before buffers are initialized */
5212 if (!tr
->trace_buffer
.buffer
)
5215 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
5219 #ifdef CONFIG_TRACER_MAX_TRACE
5220 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5221 !tr
->current_trace
->use_max_tr
)
5224 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5226 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
5227 &tr
->trace_buffer
, cpu
);
5230 * AARGH! We are left with different
5231 * size max buffer!!!!
5232 * The max buffer is our "snapshot" buffer.
5233 * When a tracer needs a snapshot (one of the
5234 * latency tracers), it swaps the max buffer
5235 * with the saved snap shot. We succeeded to
5236 * update the size of the main buffer, but failed to
5237 * update the size of the max buffer. But when we tried
5238 * to reset the main buffer to the original size, we
5239 * failed there too. This is very unlikely to
5240 * happen, but if it does, warn and kill all
5244 tracing_disabled
= 1;
5249 if (cpu
== RING_BUFFER_ALL_CPUS
)
5250 set_buffer_entries(&tr
->max_buffer
, size
);
5252 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5255 #endif /* CONFIG_TRACER_MAX_TRACE */
5257 if (cpu
== RING_BUFFER_ALL_CPUS
)
5258 set_buffer_entries(&tr
->trace_buffer
, size
);
5260 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
5265 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5266 unsigned long size
, int cpu_id
)
5270 mutex_lock(&trace_types_lock
);
5272 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5273 /* make sure, this cpu is enabled in the mask */
5274 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5280 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5285 mutex_unlock(&trace_types_lock
);
5292 * tracing_update_buffers - used by tracing facility to expand ring buffers
5294 * To save on memory when the tracing is never used on a system with it
5295 * configured in. The ring buffers are set to a minimum size. But once
5296 * a user starts to use the tracing facility, then they need to grow
5297 * to their default size.
5299 * This function is to be called when a tracer is about to be used.
5301 int tracing_update_buffers(void)
5305 mutex_lock(&trace_types_lock
);
5306 if (!ring_buffer_expanded
)
5307 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5308 RING_BUFFER_ALL_CPUS
);
5309 mutex_unlock(&trace_types_lock
);
5314 struct trace_option_dentry
;
5317 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5320 * Used to clear out the tracer before deletion of an instance.
5321 * Must have trace_types_lock held.
5323 static void tracing_set_nop(struct trace_array
*tr
)
5325 if (tr
->current_trace
== &nop_trace
)
5328 tr
->current_trace
->enabled
--;
5330 if (tr
->current_trace
->reset
)
5331 tr
->current_trace
->reset(tr
);
5333 tr
->current_trace
= &nop_trace
;
5336 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5338 /* Only enable if the directory has been created already. */
5342 create_trace_option_files(tr
, t
);
5345 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5348 #ifdef CONFIG_TRACER_MAX_TRACE
5353 mutex_lock(&trace_types_lock
);
5355 if (!ring_buffer_expanded
) {
5356 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5357 RING_BUFFER_ALL_CPUS
);
5363 for (t
= trace_types
; t
; t
= t
->next
) {
5364 if (strcmp(t
->name
, buf
) == 0)
5371 if (t
== tr
->current_trace
)
5374 /* Some tracers won't work on kernel command line */
5375 if (system_state
< SYSTEM_RUNNING
&& t
->noboot
) {
5376 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5381 /* Some tracers are only allowed for the top level buffer */
5382 if (!trace_ok_for_array(t
, tr
)) {
5387 /* If trace pipe files are being read, we can't change the tracer */
5388 if (tr
->current_trace
->ref
) {
5393 trace_branch_disable();
5395 tr
->current_trace
->enabled
--;
5397 if (tr
->current_trace
->reset
)
5398 tr
->current_trace
->reset(tr
);
5400 /* Current trace needs to be nop_trace before synchronize_sched */
5401 tr
->current_trace
= &nop_trace
;
5403 #ifdef CONFIG_TRACER_MAX_TRACE
5404 had_max_tr
= tr
->allocated_snapshot
;
5406 if (had_max_tr
&& !t
->use_max_tr
) {
5408 * We need to make sure that the update_max_tr sees that
5409 * current_trace changed to nop_trace to keep it from
5410 * swapping the buffers after we resize it.
5411 * The update_max_tr is called from interrupts disabled
5412 * so a synchronized_sched() is sufficient.
5414 synchronize_sched();
5419 #ifdef CONFIG_TRACER_MAX_TRACE
5420 if (t
->use_max_tr
&& !had_max_tr
) {
5421 ret
= tracing_alloc_snapshot_instance(tr
);
5428 ret
= tracer_init(t
, tr
);
5433 tr
->current_trace
= t
;
5434 tr
->current_trace
->enabled
++;
5435 trace_branch_enable(tr
);
5437 mutex_unlock(&trace_types_lock
);
5443 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
5444 size_t cnt
, loff_t
*ppos
)
5446 struct trace_array
*tr
= filp
->private_data
;
5447 char buf
[MAX_TRACER_SIZE
+1];
5454 if (cnt
> MAX_TRACER_SIZE
)
5455 cnt
= MAX_TRACER_SIZE
;
5457 if (copy_from_user(buf
, ubuf
, cnt
))
5462 /* strip ending whitespace. */
5463 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
5466 err
= tracing_set_tracer(tr
, buf
);
5476 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
5477 size_t cnt
, loff_t
*ppos
)
5482 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
5483 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
5484 if (r
> sizeof(buf
))
5486 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5490 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
5491 size_t cnt
, loff_t
*ppos
)
5496 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5506 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
5507 size_t cnt
, loff_t
*ppos
)
5509 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
5513 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
5514 size_t cnt
, loff_t
*ppos
)
5516 struct trace_array
*tr
= filp
->private_data
;
5519 mutex_lock(&trace_types_lock
);
5520 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
5524 if (tr
->current_trace
->update_thresh
) {
5525 ret
= tr
->current_trace
->update_thresh(tr
);
5532 mutex_unlock(&trace_types_lock
);
5537 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5540 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
5541 size_t cnt
, loff_t
*ppos
)
5543 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
5547 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
5548 size_t cnt
, loff_t
*ppos
)
5550 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
5555 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
5557 struct trace_array
*tr
= inode
->i_private
;
5558 struct trace_iterator
*iter
;
5561 if (tracing_disabled
)
5564 if (trace_array_get(tr
) < 0)
5567 mutex_lock(&trace_types_lock
);
5569 /* create a buffer to store the information to pass to userspace */
5570 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5573 __trace_array_put(tr
);
5577 trace_seq_init(&iter
->seq
);
5578 iter
->trace
= tr
->current_trace
;
5580 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
5585 /* trace pipe does not show start of buffer */
5586 cpumask_setall(iter
->started
);
5588 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
5589 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
5591 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5592 if (trace_clocks
[tr
->clock_id
].in_ns
)
5593 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
5596 iter
->trace_buffer
= &tr
->trace_buffer
;
5597 iter
->cpu_file
= tracing_get_cpu(inode
);
5598 mutex_init(&iter
->mutex
);
5599 filp
->private_data
= iter
;
5601 if (iter
->trace
->pipe_open
)
5602 iter
->trace
->pipe_open(iter
);
5604 nonseekable_open(inode
, filp
);
5606 tr
->current_trace
->ref
++;
5608 mutex_unlock(&trace_types_lock
);
5613 __trace_array_put(tr
);
5614 mutex_unlock(&trace_types_lock
);
5618 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
5620 struct trace_iterator
*iter
= file
->private_data
;
5621 struct trace_array
*tr
= inode
->i_private
;
5623 mutex_lock(&trace_types_lock
);
5625 tr
->current_trace
->ref
--;
5627 if (iter
->trace
->pipe_close
)
5628 iter
->trace
->pipe_close(iter
);
5630 mutex_unlock(&trace_types_lock
);
5632 free_cpumask_var(iter
->started
);
5633 mutex_destroy(&iter
->mutex
);
5636 trace_array_put(tr
);
5642 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
5644 struct trace_array
*tr
= iter
->tr
;
5646 /* Iterators are static, they should be filled or empty */
5647 if (trace_buffer_iter(iter
, iter
->cpu_file
))
5648 return POLLIN
| POLLRDNORM
;
5650 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
5652 * Always select as readable when in blocking mode
5654 return POLLIN
| POLLRDNORM
;
5656 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
5661 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
5663 struct trace_iterator
*iter
= filp
->private_data
;
5665 return trace_poll(iter
, filp
, poll_table
);
5668 /* Must be called with iter->mutex held. */
5669 static int tracing_wait_pipe(struct file
*filp
)
5671 struct trace_iterator
*iter
= filp
->private_data
;
5674 while (trace_empty(iter
)) {
5676 if ((filp
->f_flags
& O_NONBLOCK
)) {
5681 * We block until we read something and tracing is disabled.
5682 * We still block if tracing is disabled, but we have never
5683 * read anything. This allows a user to cat this file, and
5684 * then enable tracing. But after we have read something,
5685 * we give an EOF when tracing is again disabled.
5687 * iter->pos will be 0 if we haven't read anything.
5689 if (!tracer_tracing_is_on(iter
->tr
) && iter
->pos
)
5692 mutex_unlock(&iter
->mutex
);
5694 ret
= wait_on_pipe(iter
, false);
5696 mutex_lock(&iter
->mutex
);
5709 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
5710 size_t cnt
, loff_t
*ppos
)
5712 struct trace_iterator
*iter
= filp
->private_data
;
5716 * Avoid more than one consumer on a single file descriptor
5717 * This is just a matter of traces coherency, the ring buffer itself
5720 mutex_lock(&iter
->mutex
);
5722 /* return any leftover data */
5723 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5727 trace_seq_init(&iter
->seq
);
5729 if (iter
->trace
->read
) {
5730 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
5736 sret
= tracing_wait_pipe(filp
);
5740 /* stop when tracing is finished */
5741 if (trace_empty(iter
)) {
5746 if (cnt
>= PAGE_SIZE
)
5747 cnt
= PAGE_SIZE
- 1;
5749 /* reset all but tr, trace, and overruns */
5750 memset(&iter
->seq
, 0,
5751 sizeof(struct trace_iterator
) -
5752 offsetof(struct trace_iterator
, seq
));
5753 cpumask_clear(iter
->started
);
5756 trace_event_read_lock();
5757 trace_access_lock(iter
->cpu_file
);
5758 while (trace_find_next_entry_inc(iter
) != NULL
) {
5759 enum print_line_t ret
;
5760 int save_len
= iter
->seq
.seq
.len
;
5762 ret
= print_trace_line(iter
);
5763 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5764 /* don't print partial lines */
5765 iter
->seq
.seq
.len
= save_len
;
5768 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5769 trace_consume(iter
);
5771 if (trace_seq_used(&iter
->seq
) >= cnt
)
5775 * Setting the full flag means we reached the trace_seq buffer
5776 * size and we should leave by partial output condition above.
5777 * One of the trace_seq_* functions is not used properly.
5779 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
5782 trace_access_unlock(iter
->cpu_file
);
5783 trace_event_read_unlock();
5785 /* Now copy what we have to the user */
5786 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5787 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
5788 trace_seq_init(&iter
->seq
);
5791 * If there was nothing to send to user, in spite of consuming trace
5792 * entries, go back to wait for more entries.
5798 mutex_unlock(&iter
->mutex
);
5803 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
5806 __free_page(spd
->pages
[idx
]);
5809 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
5811 .confirm
= generic_pipe_buf_confirm
,
5812 .release
= generic_pipe_buf_release
,
5813 .steal
= generic_pipe_buf_steal
,
5814 .get
= generic_pipe_buf_get
,
5818 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
5824 /* Seq buffer is page-sized, exactly what we need. */
5826 save_len
= iter
->seq
.seq
.len
;
5827 ret
= print_trace_line(iter
);
5829 if (trace_seq_has_overflowed(&iter
->seq
)) {
5830 iter
->seq
.seq
.len
= save_len
;
5835 * This should not be hit, because it should only
5836 * be set if the iter->seq overflowed. But check it
5837 * anyway to be safe.
5839 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5840 iter
->seq
.seq
.len
= save_len
;
5844 count
= trace_seq_used(&iter
->seq
) - save_len
;
5847 iter
->seq
.seq
.len
= save_len
;
5851 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5852 trace_consume(iter
);
5854 if (!trace_find_next_entry_inc(iter
)) {
5864 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
5866 struct pipe_inode_info
*pipe
,
5870 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5871 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5872 struct trace_iterator
*iter
= filp
->private_data
;
5873 struct splice_pipe_desc spd
= {
5875 .partial
= partial_def
,
5876 .nr_pages
= 0, /* This gets updated below. */
5877 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5878 .ops
= &tracing_pipe_buf_ops
,
5879 .spd_release
= tracing_spd_release_pipe
,
5885 if (splice_grow_spd(pipe
, &spd
))
5888 mutex_lock(&iter
->mutex
);
5890 if (iter
->trace
->splice_read
) {
5891 ret
= iter
->trace
->splice_read(iter
, filp
,
5892 ppos
, pipe
, len
, flags
);
5897 ret
= tracing_wait_pipe(filp
);
5901 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
5906 trace_event_read_lock();
5907 trace_access_lock(iter
->cpu_file
);
5909 /* Fill as many pages as possible. */
5910 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
5911 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
5915 rem
= tracing_fill_pipe_page(rem
, iter
);
5917 /* Copy the data into the page, so we can start over. */
5918 ret
= trace_seq_to_buffer(&iter
->seq
,
5919 page_address(spd
.pages
[i
]),
5920 trace_seq_used(&iter
->seq
));
5922 __free_page(spd
.pages
[i
]);
5925 spd
.partial
[i
].offset
= 0;
5926 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
5928 trace_seq_init(&iter
->seq
);
5931 trace_access_unlock(iter
->cpu_file
);
5932 trace_event_read_unlock();
5933 mutex_unlock(&iter
->mutex
);
5938 ret
= splice_to_pipe(pipe
, &spd
);
5942 splice_shrink_spd(&spd
);
5946 mutex_unlock(&iter
->mutex
);
5951 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
5952 size_t cnt
, loff_t
*ppos
)
5954 struct inode
*inode
= file_inode(filp
);
5955 struct trace_array
*tr
= inode
->i_private
;
5956 int cpu
= tracing_get_cpu(inode
);
5961 mutex_lock(&trace_types_lock
);
5963 if (cpu
== RING_BUFFER_ALL_CPUS
) {
5964 int cpu
, buf_size_same
;
5969 /* check if all cpu sizes are same */
5970 for_each_tracing_cpu(cpu
) {
5971 /* fill in the size from first enabled cpu */
5973 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
5974 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
5980 if (buf_size_same
) {
5981 if (!ring_buffer_expanded
)
5982 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
5984 trace_buf_size
>> 10);
5986 r
= sprintf(buf
, "%lu\n", size
>> 10);
5988 r
= sprintf(buf
, "X\n");
5990 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5992 mutex_unlock(&trace_types_lock
);
5994 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5999 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
6000 size_t cnt
, loff_t
*ppos
)
6002 struct inode
*inode
= file_inode(filp
);
6003 struct trace_array
*tr
= inode
->i_private
;
6007 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6011 /* must have at least 1 entry */
6015 /* value is in KB */
6017 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6027 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6028 size_t cnt
, loff_t
*ppos
)
6030 struct trace_array
*tr
= filp
->private_data
;
6033 unsigned long size
= 0, expanded_size
= 0;
6035 mutex_lock(&trace_types_lock
);
6036 for_each_tracing_cpu(cpu
) {
6037 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
6038 if (!ring_buffer_expanded
)
6039 expanded_size
+= trace_buf_size
>> 10;
6041 if (ring_buffer_expanded
)
6042 r
= sprintf(buf
, "%lu\n", size
);
6044 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6045 mutex_unlock(&trace_types_lock
);
6047 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6051 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6052 size_t cnt
, loff_t
*ppos
)
6055 * There is no need to read what the user has written, this function
6056 * is just to make sure that there is no error when "echo" is used
6065 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6067 struct trace_array
*tr
= inode
->i_private
;
6069 /* disable tracing ? */
6070 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6071 tracer_tracing_off(tr
);
6072 /* resize the ring buffer to 0 */
6073 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6075 trace_array_put(tr
);
6081 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6082 size_t cnt
, loff_t
*fpos
)
6084 struct trace_array
*tr
= filp
->private_data
;
6085 struct ring_buffer_event
*event
;
6086 struct ring_buffer
*buffer
;
6087 struct print_entry
*entry
;
6088 unsigned long irq_flags
;
6089 const char faulted
[] = "<faulted>";
6094 /* Used in tracing_mark_raw_write() as well */
6095 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6097 if (tracing_disabled
)
6100 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6103 if (cnt
> TRACE_BUF_SIZE
)
6104 cnt
= TRACE_BUF_SIZE
;
6106 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6108 local_save_flags(irq_flags
);
6109 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6111 /* If less than "<faulted>", then make sure we can still add that */
6112 if (cnt
< FAULTED_SIZE
)
6113 size
+= FAULTED_SIZE
- cnt
;
6115 buffer
= tr
->trace_buffer
.buffer
;
6116 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6117 irq_flags
, preempt_count());
6118 if (unlikely(!event
))
6119 /* Ring buffer disabled, return as if not open for write */
6122 entry
= ring_buffer_event_data(event
);
6123 entry
->ip
= _THIS_IP_
;
6125 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6127 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6134 if (entry
->buf
[cnt
- 1] != '\n') {
6135 entry
->buf
[cnt
] = '\n';
6136 entry
->buf
[cnt
+ 1] = '\0';
6138 entry
->buf
[cnt
] = '\0';
6140 __buffer_unlock_commit(buffer
, event
);
6148 /* Limit it for now to 3K (including tag) */
6149 #define RAW_DATA_MAX_SIZE (1024*3)
6152 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6153 size_t cnt
, loff_t
*fpos
)
6155 struct trace_array
*tr
= filp
->private_data
;
6156 struct ring_buffer_event
*event
;
6157 struct ring_buffer
*buffer
;
6158 struct raw_data_entry
*entry
;
6159 const char faulted
[] = "<faulted>";
6160 unsigned long irq_flags
;
6165 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6167 if (tracing_disabled
)
6170 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6173 /* The marker must at least have a tag id */
6174 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6177 if (cnt
> TRACE_BUF_SIZE
)
6178 cnt
= TRACE_BUF_SIZE
;
6180 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6182 local_save_flags(irq_flags
);
6183 size
= sizeof(*entry
) + cnt
;
6184 if (cnt
< FAULT_SIZE_ID
)
6185 size
+= FAULT_SIZE_ID
- cnt
;
6187 buffer
= tr
->trace_buffer
.buffer
;
6188 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6189 irq_flags
, preempt_count());
6191 /* Ring buffer disabled, return as if not open for write */
6194 entry
= ring_buffer_event_data(event
);
6196 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6199 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6204 __buffer_unlock_commit(buffer
, event
);
6212 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6214 struct trace_array
*tr
= m
->private;
6217 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6219 "%s%s%s%s", i
? " " : "",
6220 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6221 i
== tr
->clock_id
? "]" : "");
6227 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6231 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6232 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6235 if (i
== ARRAY_SIZE(trace_clocks
))
6238 mutex_lock(&trace_types_lock
);
6242 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
6245 * New clock may not be consistent with the previous clock.
6246 * Reset the buffer so that it doesn't have incomparable timestamps.
6248 tracing_reset_online_cpus(&tr
->trace_buffer
);
6250 #ifdef CONFIG_TRACER_MAX_TRACE
6251 if (tr
->max_buffer
.buffer
)
6252 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6253 tracing_reset_online_cpus(&tr
->max_buffer
);
6256 mutex_unlock(&trace_types_lock
);
6261 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6262 size_t cnt
, loff_t
*fpos
)
6264 struct seq_file
*m
= filp
->private_data
;
6265 struct trace_array
*tr
= m
->private;
6267 const char *clockstr
;
6270 if (cnt
>= sizeof(buf
))
6273 if (copy_from_user(buf
, ubuf
, cnt
))
6278 clockstr
= strstrip(buf
);
6280 ret
= tracing_set_clock(tr
, clockstr
);
6289 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6291 struct trace_array
*tr
= inode
->i_private
;
6294 if (tracing_disabled
)
6297 if (trace_array_get(tr
))
6300 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6302 trace_array_put(tr
);
6307 struct ftrace_buffer_info
{
6308 struct trace_iterator iter
;
6310 unsigned int spare_cpu
;
6314 #ifdef CONFIG_TRACER_SNAPSHOT
6315 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6317 struct trace_array
*tr
= inode
->i_private
;
6318 struct trace_iterator
*iter
;
6322 if (trace_array_get(tr
) < 0)
6325 if (file
->f_mode
& FMODE_READ
) {
6326 iter
= __tracing_open(inode
, file
, true);
6328 ret
= PTR_ERR(iter
);
6330 /* Writes still need the seq_file to hold the private data */
6332 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6335 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6343 iter
->trace_buffer
= &tr
->max_buffer
;
6344 iter
->cpu_file
= tracing_get_cpu(inode
);
6346 file
->private_data
= m
;
6350 trace_array_put(tr
);
6356 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6359 struct seq_file
*m
= filp
->private_data
;
6360 struct trace_iterator
*iter
= m
->private;
6361 struct trace_array
*tr
= iter
->tr
;
6365 ret
= tracing_update_buffers();
6369 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6373 mutex_lock(&trace_types_lock
);
6375 if (tr
->current_trace
->use_max_tr
) {
6382 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6386 if (tr
->allocated_snapshot
)
6390 /* Only allow per-cpu swap if the ring buffer supports it */
6391 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6392 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6397 if (!tr
->allocated_snapshot
) {
6398 ret
= tracing_alloc_snapshot_instance(tr
);
6402 local_irq_disable();
6403 /* Now, we're going to swap */
6404 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6405 update_max_tr(tr
, current
, smp_processor_id());
6407 update_max_tr_single(tr
, current
, iter
->cpu_file
);
6411 if (tr
->allocated_snapshot
) {
6412 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6413 tracing_reset_online_cpus(&tr
->max_buffer
);
6415 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
6425 mutex_unlock(&trace_types_lock
);
6429 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
6431 struct seq_file
*m
= file
->private_data
;
6434 ret
= tracing_release(inode
, file
);
6436 if (file
->f_mode
& FMODE_READ
)
6439 /* If write only, the seq_file is just a stub */
6447 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
6448 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6449 size_t count
, loff_t
*ppos
);
6450 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
6451 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6452 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
6454 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
6456 struct ftrace_buffer_info
*info
;
6459 ret
= tracing_buffers_open(inode
, filp
);
6463 info
= filp
->private_data
;
6465 if (info
->iter
.trace
->use_max_tr
) {
6466 tracing_buffers_release(inode
, filp
);
6470 info
->iter
.snapshot
= true;
6471 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
6476 #endif /* CONFIG_TRACER_SNAPSHOT */
6479 static const struct file_operations tracing_thresh_fops
= {
6480 .open
= tracing_open_generic
,
6481 .read
= tracing_thresh_read
,
6482 .write
= tracing_thresh_write
,
6483 .llseek
= generic_file_llseek
,
6486 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6487 static const struct file_operations tracing_max_lat_fops
= {
6488 .open
= tracing_open_generic
,
6489 .read
= tracing_max_lat_read
,
6490 .write
= tracing_max_lat_write
,
6491 .llseek
= generic_file_llseek
,
6495 static const struct file_operations set_tracer_fops
= {
6496 .open
= tracing_open_generic
,
6497 .read
= tracing_set_trace_read
,
6498 .write
= tracing_set_trace_write
,
6499 .llseek
= generic_file_llseek
,
6502 static const struct file_operations tracing_pipe_fops
= {
6503 .open
= tracing_open_pipe
,
6504 .poll
= tracing_poll_pipe
,
6505 .read
= tracing_read_pipe
,
6506 .splice_read
= tracing_splice_read_pipe
,
6507 .release
= tracing_release_pipe
,
6508 .llseek
= no_llseek
,
6511 static const struct file_operations tracing_entries_fops
= {
6512 .open
= tracing_open_generic_tr
,
6513 .read
= tracing_entries_read
,
6514 .write
= tracing_entries_write
,
6515 .llseek
= generic_file_llseek
,
6516 .release
= tracing_release_generic_tr
,
6519 static const struct file_operations tracing_total_entries_fops
= {
6520 .open
= tracing_open_generic_tr
,
6521 .read
= tracing_total_entries_read
,
6522 .llseek
= generic_file_llseek
,
6523 .release
= tracing_release_generic_tr
,
6526 static const struct file_operations tracing_free_buffer_fops
= {
6527 .open
= tracing_open_generic_tr
,
6528 .write
= tracing_free_buffer_write
,
6529 .release
= tracing_free_buffer_release
,
6532 static const struct file_operations tracing_mark_fops
= {
6533 .open
= tracing_open_generic_tr
,
6534 .write
= tracing_mark_write
,
6535 .llseek
= generic_file_llseek
,
6536 .release
= tracing_release_generic_tr
,
6539 static const struct file_operations tracing_mark_raw_fops
= {
6540 .open
= tracing_open_generic_tr
,
6541 .write
= tracing_mark_raw_write
,
6542 .llseek
= generic_file_llseek
,
6543 .release
= tracing_release_generic_tr
,
6546 static const struct file_operations trace_clock_fops
= {
6547 .open
= tracing_clock_open
,
6549 .llseek
= seq_lseek
,
6550 .release
= tracing_single_release_tr
,
6551 .write
= tracing_clock_write
,
6554 #ifdef CONFIG_TRACER_SNAPSHOT
6555 static const struct file_operations snapshot_fops
= {
6556 .open
= tracing_snapshot_open
,
6558 .write
= tracing_snapshot_write
,
6559 .llseek
= tracing_lseek
,
6560 .release
= tracing_snapshot_release
,
6563 static const struct file_operations snapshot_raw_fops
= {
6564 .open
= snapshot_raw_open
,
6565 .read
= tracing_buffers_read
,
6566 .release
= tracing_buffers_release
,
6567 .splice_read
= tracing_buffers_splice_read
,
6568 .llseek
= no_llseek
,
6571 #endif /* CONFIG_TRACER_SNAPSHOT */
6573 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
6575 struct trace_array
*tr
= inode
->i_private
;
6576 struct ftrace_buffer_info
*info
;
6579 if (tracing_disabled
)
6582 if (trace_array_get(tr
) < 0)
6585 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
6587 trace_array_put(tr
);
6591 mutex_lock(&trace_types_lock
);
6594 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
6595 info
->iter
.trace
= tr
->current_trace
;
6596 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
6598 /* Force reading ring buffer for first read */
6599 info
->read
= (unsigned int)-1;
6601 filp
->private_data
= info
;
6603 tr
->current_trace
->ref
++;
6605 mutex_unlock(&trace_types_lock
);
6607 ret
= nonseekable_open(inode
, filp
);
6609 trace_array_put(tr
);
6615 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
6617 struct ftrace_buffer_info
*info
= filp
->private_data
;
6618 struct trace_iterator
*iter
= &info
->iter
;
6620 return trace_poll(iter
, filp
, poll_table
);
6624 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6625 size_t count
, loff_t
*ppos
)
6627 struct ftrace_buffer_info
*info
= filp
->private_data
;
6628 struct trace_iterator
*iter
= &info
->iter
;
6635 #ifdef CONFIG_TRACER_MAX_TRACE
6636 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6641 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
6643 if (IS_ERR(info
->spare
)) {
6644 ret
= PTR_ERR(info
->spare
);
6647 info
->spare_cpu
= iter
->cpu_file
;
6653 /* Do we have previous read data to read? */
6654 if (info
->read
< PAGE_SIZE
)
6658 trace_access_lock(iter
->cpu_file
);
6659 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
6663 trace_access_unlock(iter
->cpu_file
);
6666 if (trace_empty(iter
)) {
6667 if ((filp
->f_flags
& O_NONBLOCK
))
6670 ret
= wait_on_pipe(iter
, false);
6681 size
= PAGE_SIZE
- info
->read
;
6685 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
6697 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
6699 struct ftrace_buffer_info
*info
= file
->private_data
;
6700 struct trace_iterator
*iter
= &info
->iter
;
6702 mutex_lock(&trace_types_lock
);
6704 iter
->tr
->current_trace
->ref
--;
6706 __trace_array_put(iter
->tr
);
6709 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
,
6710 info
->spare_cpu
, info
->spare
);
6713 mutex_unlock(&trace_types_lock
);
6719 struct ring_buffer
*buffer
;
6722 refcount_t refcount
;
6725 static void buffer_ref_release(struct buffer_ref
*ref
)
6727 if (!refcount_dec_and_test(&ref
->refcount
))
6729 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6733 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
6734 struct pipe_buffer
*buf
)
6736 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6738 buffer_ref_release(ref
);
6742 static bool buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
6743 struct pipe_buffer
*buf
)
6745 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6747 if (refcount_read(&ref
->refcount
) > INT_MAX
/2)
6750 refcount_inc(&ref
->refcount
);
6754 /* Pipe buffer operations for a buffer. */
6755 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
6757 .confirm
= generic_pipe_buf_confirm
,
6758 .release
= buffer_pipe_buf_release
,
6759 .steal
= generic_pipe_buf_nosteal
,
6760 .get
= buffer_pipe_buf_get
,
6764 * Callback from splice_to_pipe(), if we need to release some pages
6765 * at the end of the spd in case we error'ed out in filling the pipe.
6767 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
6769 struct buffer_ref
*ref
=
6770 (struct buffer_ref
*)spd
->partial
[i
].private;
6772 buffer_ref_release(ref
);
6773 spd
->partial
[i
].private = 0;
6777 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6778 struct pipe_inode_info
*pipe
, size_t len
,
6781 struct ftrace_buffer_info
*info
= file
->private_data
;
6782 struct trace_iterator
*iter
= &info
->iter
;
6783 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6784 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6785 struct splice_pipe_desc spd
= {
6787 .partial
= partial_def
,
6788 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6789 .ops
= &buffer_pipe_buf_ops
,
6790 .spd_release
= buffer_spd_release
,
6792 struct buffer_ref
*ref
;
6796 #ifdef CONFIG_TRACER_MAX_TRACE
6797 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6801 if (*ppos
& (PAGE_SIZE
- 1))
6804 if (len
& (PAGE_SIZE
- 1)) {
6805 if (len
< PAGE_SIZE
)
6810 if (splice_grow_spd(pipe
, &spd
))
6814 trace_access_lock(iter
->cpu_file
);
6815 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6817 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
6821 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
6827 refcount_set(&ref
->refcount
, 1);
6828 ref
->buffer
= iter
->trace_buffer
->buffer
;
6829 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
6830 if (IS_ERR(ref
->page
)) {
6831 ret
= PTR_ERR(ref
->page
);
6836 ref
->cpu
= iter
->cpu_file
;
6838 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
6839 len
, iter
->cpu_file
, 1);
6841 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
6847 page
= virt_to_page(ref
->page
);
6849 spd
.pages
[i
] = page
;
6850 spd
.partial
[i
].len
= PAGE_SIZE
;
6851 spd
.partial
[i
].offset
= 0;
6852 spd
.partial
[i
].private = (unsigned long)ref
;
6856 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6859 trace_access_unlock(iter
->cpu_file
);
6862 /* did we read anything? */
6863 if (!spd
.nr_pages
) {
6868 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
6871 ret
= wait_on_pipe(iter
, true);
6878 ret
= splice_to_pipe(pipe
, &spd
);
6880 splice_shrink_spd(&spd
);
6885 static const struct file_operations tracing_buffers_fops
= {
6886 .open
= tracing_buffers_open
,
6887 .read
= tracing_buffers_read
,
6888 .poll
= tracing_buffers_poll
,
6889 .release
= tracing_buffers_release
,
6890 .splice_read
= tracing_buffers_splice_read
,
6891 .llseek
= no_llseek
,
6895 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
6896 size_t count
, loff_t
*ppos
)
6898 struct inode
*inode
= file_inode(filp
);
6899 struct trace_array
*tr
= inode
->i_private
;
6900 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
6901 int cpu
= tracing_get_cpu(inode
);
6902 struct trace_seq
*s
;
6904 unsigned long long t
;
6905 unsigned long usec_rem
;
6907 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
6913 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
6914 trace_seq_printf(s
, "entries: %ld\n", cnt
);
6916 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
6917 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
6919 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
6920 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
6922 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
6923 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
6925 if (trace_clocks
[tr
->clock_id
].in_ns
) {
6926 /* local or global for trace_clock */
6927 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6928 usec_rem
= do_div(t
, USEC_PER_SEC
);
6929 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
6932 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6933 usec_rem
= do_div(t
, USEC_PER_SEC
);
6934 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
6936 /* counter or tsc mode for trace_clock */
6937 trace_seq_printf(s
, "oldest event ts: %llu\n",
6938 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
6940 trace_seq_printf(s
, "now ts: %llu\n",
6941 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
6944 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
6945 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
6947 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
6948 trace_seq_printf(s
, "read events: %ld\n", cnt
);
6950 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
6951 s
->buffer
, trace_seq_used(s
));
6958 static const struct file_operations tracing_stats_fops
= {
6959 .open
= tracing_open_generic_tr
,
6960 .read
= tracing_stats_read
,
6961 .llseek
= generic_file_llseek
,
6962 .release
= tracing_release_generic_tr
,
6965 #ifdef CONFIG_DYNAMIC_FTRACE
6968 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
6969 size_t cnt
, loff_t
*ppos
)
6971 unsigned long *p
= filp
->private_data
;
6972 char buf
[64]; /* Not too big for a shallow stack */
6975 r
= scnprintf(buf
, 63, "%ld", *p
);
6978 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6981 static const struct file_operations tracing_dyn_info_fops
= {
6982 .open
= tracing_open_generic
,
6983 .read
= tracing_read_dyn_info
,
6984 .llseek
= generic_file_llseek
,
6986 #endif /* CONFIG_DYNAMIC_FTRACE */
6988 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6990 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
6991 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
6994 tracing_snapshot_instance(tr
);
6998 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
6999 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
7002 struct ftrace_func_mapper
*mapper
= data
;
7006 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7016 tracing_snapshot_instance(tr
);
7020 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7021 struct ftrace_probe_ops
*ops
, void *data
)
7023 struct ftrace_func_mapper
*mapper
= data
;
7026 seq_printf(m
, "%ps:", (void *)ip
);
7028 seq_puts(m
, "snapshot");
7031 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7034 seq_printf(m
, ":count=%ld\n", *count
);
7036 seq_puts(m
, ":unlimited\n");
7042 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7043 unsigned long ip
, void *init_data
, void **data
)
7045 struct ftrace_func_mapper
*mapper
= *data
;
7048 mapper
= allocate_ftrace_func_mapper();
7054 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7058 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7059 unsigned long ip
, void *data
)
7061 struct ftrace_func_mapper
*mapper
= data
;
7066 free_ftrace_func_mapper(mapper
, NULL
);
7070 ftrace_func_mapper_remove_ip(mapper
, ip
);
7073 static struct ftrace_probe_ops snapshot_probe_ops
= {
7074 .func
= ftrace_snapshot
,
7075 .print
= ftrace_snapshot_print
,
7078 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7079 .func
= ftrace_count_snapshot
,
7080 .print
= ftrace_snapshot_print
,
7081 .init
= ftrace_snapshot_init
,
7082 .free
= ftrace_snapshot_free
,
7086 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7087 char *glob
, char *cmd
, char *param
, int enable
)
7089 struct ftrace_probe_ops
*ops
;
7090 void *count
= (void *)-1;
7097 /* hash funcs only work with set_ftrace_filter */
7101 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
7104 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
7109 number
= strsep(¶m
, ":");
7111 if (!strlen(number
))
7115 * We use the callback data field (which is a pointer)
7118 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
7123 ret
= tracing_alloc_snapshot_instance(tr
);
7127 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
7130 return ret
< 0 ? ret
: 0;
7133 static struct ftrace_func_command ftrace_snapshot_cmd
= {
7135 .func
= ftrace_trace_snapshot_callback
,
7138 static __init
int register_snapshot_cmd(void)
7140 return register_ftrace_command(&ftrace_snapshot_cmd
);
7143 static inline __init
int register_snapshot_cmd(void) { return 0; }
7144 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7146 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
7148 if (WARN_ON(!tr
->dir
))
7149 return ERR_PTR(-ENODEV
);
7151 /* Top directory uses NULL as the parent */
7152 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
7155 /* All sub buffers have a descriptor */
7159 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
7161 struct dentry
*d_tracer
;
7164 return tr
->percpu_dir
;
7166 d_tracer
= tracing_get_dentry(tr
);
7167 if (IS_ERR(d_tracer
))
7170 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
7172 WARN_ONCE(!tr
->percpu_dir
,
7173 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
7175 return tr
->percpu_dir
;
7178 static struct dentry
*
7179 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
7180 void *data
, long cpu
, const struct file_operations
*fops
)
7182 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
7184 if (ret
) /* See tracing_get_cpu() */
7185 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
7190 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
7192 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
7193 struct dentry
*d_cpu
;
7194 char cpu_dir
[30]; /* 30 characters should be more than enough */
7199 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
7200 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
7202 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
7206 /* per cpu trace_pipe */
7207 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
7208 tr
, cpu
, &tracing_pipe_fops
);
7211 trace_create_cpu_file("trace", 0644, d_cpu
,
7212 tr
, cpu
, &tracing_fops
);
7214 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
7215 tr
, cpu
, &tracing_buffers_fops
);
7217 trace_create_cpu_file("stats", 0444, d_cpu
,
7218 tr
, cpu
, &tracing_stats_fops
);
7220 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
7221 tr
, cpu
, &tracing_entries_fops
);
7223 #ifdef CONFIG_TRACER_SNAPSHOT
7224 trace_create_cpu_file("snapshot", 0644, d_cpu
,
7225 tr
, cpu
, &snapshot_fops
);
7227 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
7228 tr
, cpu
, &snapshot_raw_fops
);
7232 #ifdef CONFIG_FTRACE_SELFTEST
7233 /* Let selftest have access to static functions in this file */
7234 #include "trace_selftest.c"
7238 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7241 struct trace_option_dentry
*topt
= filp
->private_data
;
7244 if (topt
->flags
->val
& topt
->opt
->bit
)
7249 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7253 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7256 struct trace_option_dentry
*topt
= filp
->private_data
;
7260 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7264 if (val
!= 0 && val
!= 1)
7267 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
7268 mutex_lock(&trace_types_lock
);
7269 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
7271 mutex_unlock(&trace_types_lock
);
7282 static const struct file_operations trace_options_fops
= {
7283 .open
= tracing_open_generic
,
7284 .read
= trace_options_read
,
7285 .write
= trace_options_write
,
7286 .llseek
= generic_file_llseek
,
7290 * In order to pass in both the trace_array descriptor as well as the index
7291 * to the flag that the trace option file represents, the trace_array
7292 * has a character array of trace_flags_index[], which holds the index
7293 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7294 * The address of this character array is passed to the flag option file
7295 * read/write callbacks.
7297 * In order to extract both the index and the trace_array descriptor,
7298 * get_tr_index() uses the following algorithm.
7302 * As the pointer itself contains the address of the index (remember
7305 * Then to get the trace_array descriptor, by subtracting that index
7306 * from the ptr, we get to the start of the index itself.
7308 * ptr - idx == &index[0]
7310 * Then a simple container_of() from that pointer gets us to the
7311 * trace_array descriptor.
7313 static void get_tr_index(void *data
, struct trace_array
**ptr
,
7314 unsigned int *pindex
)
7316 *pindex
= *(unsigned char *)data
;
7318 *ptr
= container_of(data
- *pindex
, struct trace_array
,
7323 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7326 void *tr_index
= filp
->private_data
;
7327 struct trace_array
*tr
;
7331 get_tr_index(tr_index
, &tr
, &index
);
7333 if (tr
->trace_flags
& (1 << index
))
7338 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7342 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7345 void *tr_index
= filp
->private_data
;
7346 struct trace_array
*tr
;
7351 get_tr_index(tr_index
, &tr
, &index
);
7353 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7357 if (val
!= 0 && val
!= 1)
7360 mutex_lock(&trace_types_lock
);
7361 ret
= set_tracer_flag(tr
, 1 << index
, val
);
7362 mutex_unlock(&trace_types_lock
);
7372 static const struct file_operations trace_options_core_fops
= {
7373 .open
= tracing_open_generic
,
7374 .read
= trace_options_core_read
,
7375 .write
= trace_options_core_write
,
7376 .llseek
= generic_file_llseek
,
7379 struct dentry
*trace_create_file(const char *name
,
7381 struct dentry
*parent
,
7383 const struct file_operations
*fops
)
7387 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
7389 pr_warn("Could not create tracefs '%s' entry\n", name
);
7395 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
7397 struct dentry
*d_tracer
;
7402 d_tracer
= tracing_get_dentry(tr
);
7403 if (IS_ERR(d_tracer
))
7406 tr
->options
= tracefs_create_dir("options", d_tracer
);
7408 pr_warn("Could not create tracefs directory 'options'\n");
7416 create_trace_option_file(struct trace_array
*tr
,
7417 struct trace_option_dentry
*topt
,
7418 struct tracer_flags
*flags
,
7419 struct tracer_opt
*opt
)
7421 struct dentry
*t_options
;
7423 t_options
= trace_options_init_dentry(tr
);
7427 topt
->flags
= flags
;
7431 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
7432 &trace_options_fops
);
7437 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
7439 struct trace_option_dentry
*topts
;
7440 struct trace_options
*tr_topts
;
7441 struct tracer_flags
*flags
;
7442 struct tracer_opt
*opts
;
7449 flags
= tracer
->flags
;
7451 if (!flags
|| !flags
->opts
)
7455 * If this is an instance, only create flags for tracers
7456 * the instance may have.
7458 if (!trace_ok_for_array(tracer
, tr
))
7461 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7462 /* Make sure there's no duplicate flags. */
7463 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
7469 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
7472 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
7476 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
7483 tr
->topts
= tr_topts
;
7484 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
7485 tr
->topts
[tr
->nr_topts
].topts
= topts
;
7488 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
7489 create_trace_option_file(tr
, &topts
[cnt
], flags
,
7491 WARN_ONCE(topts
[cnt
].entry
== NULL
,
7492 "Failed to create trace option: %s",
7497 static struct dentry
*
7498 create_trace_option_core_file(struct trace_array
*tr
,
7499 const char *option
, long index
)
7501 struct dentry
*t_options
;
7503 t_options
= trace_options_init_dentry(tr
);
7507 return trace_create_file(option
, 0644, t_options
,
7508 (void *)&tr
->trace_flags_index
[index
],
7509 &trace_options_core_fops
);
7512 static void create_trace_options_dir(struct trace_array
*tr
)
7514 struct dentry
*t_options
;
7515 bool top_level
= tr
== &global_trace
;
7518 t_options
= trace_options_init_dentry(tr
);
7522 for (i
= 0; trace_options
[i
]; i
++) {
7524 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
7525 create_trace_option_core_file(tr
, trace_options
[i
], i
);
7530 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
7531 size_t cnt
, loff_t
*ppos
)
7533 struct trace_array
*tr
= filp
->private_data
;
7537 r
= tracer_tracing_is_on(tr
);
7538 r
= sprintf(buf
, "%d\n", r
);
7540 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7544 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
7545 size_t cnt
, loff_t
*ppos
)
7547 struct trace_array
*tr
= filp
->private_data
;
7548 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
7552 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7557 mutex_lock(&trace_types_lock
);
7558 if (!!val
== tracer_tracing_is_on(tr
)) {
7559 val
= 0; /* do nothing */
7561 tracer_tracing_on(tr
);
7562 if (tr
->current_trace
->start
)
7563 tr
->current_trace
->start(tr
);
7565 tracer_tracing_off(tr
);
7566 if (tr
->current_trace
->stop
)
7567 tr
->current_trace
->stop(tr
);
7569 mutex_unlock(&trace_types_lock
);
7577 static const struct file_operations rb_simple_fops
= {
7578 .open
= tracing_open_generic_tr
,
7579 .read
= rb_simple_read
,
7580 .write
= rb_simple_write
,
7581 .release
= tracing_release_generic_tr
,
7582 .llseek
= default_llseek
,
7585 struct dentry
*trace_instance_dir
;
7588 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
7591 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
7593 enum ring_buffer_flags rb_flags
;
7595 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
7599 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
7603 buf
->data
= alloc_percpu(struct trace_array_cpu
);
7605 ring_buffer_free(buf
->buffer
);
7610 /* Allocate the first page for all buffers */
7611 set_buffer_entries(&tr
->trace_buffer
,
7612 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
7617 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
7621 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
7625 #ifdef CONFIG_TRACER_MAX_TRACE
7626 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
7627 allocate_snapshot
? size
: 1);
7629 ring_buffer_free(tr
->trace_buffer
.buffer
);
7630 tr
->trace_buffer
.buffer
= NULL
;
7631 free_percpu(tr
->trace_buffer
.data
);
7632 tr
->trace_buffer
.data
= NULL
;
7635 tr
->allocated_snapshot
= allocate_snapshot
;
7638 * Only the top level trace array gets its snapshot allocated
7639 * from the kernel command line.
7641 allocate_snapshot
= false;
7646 static void free_trace_buffer(struct trace_buffer
*buf
)
7649 ring_buffer_free(buf
->buffer
);
7651 free_percpu(buf
->data
);
7656 static void free_trace_buffers(struct trace_array
*tr
)
7661 free_trace_buffer(&tr
->trace_buffer
);
7663 #ifdef CONFIG_TRACER_MAX_TRACE
7664 free_trace_buffer(&tr
->max_buffer
);
7668 static void init_trace_flags_index(struct trace_array
*tr
)
7672 /* Used by the trace options files */
7673 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
7674 tr
->trace_flags_index
[i
] = i
;
7677 static void __update_tracer_options(struct trace_array
*tr
)
7681 for (t
= trace_types
; t
; t
= t
->next
)
7682 add_tracer_options(tr
, t
);
7685 static void update_tracer_options(struct trace_array
*tr
)
7687 mutex_lock(&trace_types_lock
);
7688 __update_tracer_options(tr
);
7689 mutex_unlock(&trace_types_lock
);
7692 static int instance_mkdir(const char *name
)
7694 struct trace_array
*tr
;
7697 mutex_lock(&trace_types_lock
);
7700 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7701 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
7706 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
7710 tr
->name
= kstrdup(name
, GFP_KERNEL
);
7714 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
7717 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
7719 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
7721 raw_spin_lock_init(&tr
->start_lock
);
7723 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7725 tr
->current_trace
= &nop_trace
;
7727 INIT_LIST_HEAD(&tr
->systems
);
7728 INIT_LIST_HEAD(&tr
->events
);
7730 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
7733 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
7737 ret
= event_trace_add_tracer(tr
->dir
, tr
);
7739 tracefs_remove_recursive(tr
->dir
);
7743 ftrace_init_trace_array(tr
);
7745 init_tracer_tracefs(tr
, tr
->dir
);
7746 init_trace_flags_index(tr
);
7747 __update_tracer_options(tr
);
7749 list_add(&tr
->list
, &ftrace_trace_arrays
);
7751 mutex_unlock(&trace_types_lock
);
7756 free_trace_buffers(tr
);
7757 free_cpumask_var(tr
->tracing_cpumask
);
7762 mutex_unlock(&trace_types_lock
);
7768 static int instance_rmdir(const char *name
)
7770 struct trace_array
*tr
;
7775 mutex_lock(&trace_types_lock
);
7778 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7779 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
7788 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
7791 list_del(&tr
->list
);
7793 /* Disable all the flags that were enabled coming in */
7794 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
7795 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
7796 set_tracer_flag(tr
, 1 << i
, 0);
7799 tracing_set_nop(tr
);
7800 clear_ftrace_function_probes(tr
);
7801 event_trace_del_tracer(tr
);
7802 ftrace_clear_pids(tr
);
7803 ftrace_destroy_function_files(tr
);
7804 tracefs_remove_recursive(tr
->dir
);
7805 free_trace_buffers(tr
);
7807 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7808 kfree(tr
->topts
[i
].topts
);
7812 free_cpumask_var(tr
->tracing_cpumask
);
7819 mutex_unlock(&trace_types_lock
);
7824 static __init
void create_trace_instances(struct dentry
*d_tracer
)
7826 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
7829 if (WARN_ON(!trace_instance_dir
))
7834 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7838 trace_create_file("available_tracers", 0444, d_tracer
,
7839 tr
, &show_traces_fops
);
7841 trace_create_file("current_tracer", 0644, d_tracer
,
7842 tr
, &set_tracer_fops
);
7844 trace_create_file("tracing_cpumask", 0644, d_tracer
,
7845 tr
, &tracing_cpumask_fops
);
7847 trace_create_file("trace_options", 0644, d_tracer
,
7848 tr
, &tracing_iter_fops
);
7850 trace_create_file("trace", 0644, d_tracer
,
7853 trace_create_file("trace_pipe", 0444, d_tracer
,
7854 tr
, &tracing_pipe_fops
);
7856 trace_create_file("buffer_size_kb", 0644, d_tracer
,
7857 tr
, &tracing_entries_fops
);
7859 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
7860 tr
, &tracing_total_entries_fops
);
7862 trace_create_file("free_buffer", 0200, d_tracer
,
7863 tr
, &tracing_free_buffer_fops
);
7865 trace_create_file("trace_marker", 0220, d_tracer
,
7866 tr
, &tracing_mark_fops
);
7868 trace_create_file("trace_marker_raw", 0220, d_tracer
,
7869 tr
, &tracing_mark_raw_fops
);
7871 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
7874 trace_create_file("tracing_on", 0644, d_tracer
,
7875 tr
, &rb_simple_fops
);
7877 create_trace_options_dir(tr
);
7879 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7880 trace_create_file("tracing_max_latency", 0644, d_tracer
,
7881 &tr
->max_latency
, &tracing_max_lat_fops
);
7884 if (ftrace_create_function_files(tr
, d_tracer
))
7885 WARN(1, "Could not allocate function filter files");
7887 #ifdef CONFIG_TRACER_SNAPSHOT
7888 trace_create_file("snapshot", 0644, d_tracer
,
7889 tr
, &snapshot_fops
);
7892 for_each_tracing_cpu(cpu
)
7893 tracing_init_tracefs_percpu(tr
, cpu
);
7895 ftrace_init_tracefs(tr
, d_tracer
);
7898 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
7900 struct vfsmount
*mnt
;
7901 struct file_system_type
*type
;
7904 * To maintain backward compatibility for tools that mount
7905 * debugfs to get to the tracing facility, tracefs is automatically
7906 * mounted to the debugfs/tracing directory.
7908 type
= get_fs_type("tracefs");
7911 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
7912 put_filesystem(type
);
7921 * tracing_init_dentry - initialize top level trace array
7923 * This is called when creating files or directories in the tracing
7924 * directory. It is called via fs_initcall() by any of the boot up code
7925 * and expects to return the dentry of the top level tracing directory.
7927 struct dentry
*tracing_init_dentry(void)
7929 struct trace_array
*tr
= &global_trace
;
7931 /* The top level trace array uses NULL as parent */
7935 if (WARN_ON(!tracefs_initialized()) ||
7936 (IS_ENABLED(CONFIG_DEBUG_FS
) &&
7937 WARN_ON(!debugfs_initialized())))
7938 return ERR_PTR(-ENODEV
);
7941 * As there may still be users that expect the tracing
7942 * files to exist in debugfs/tracing, we must automount
7943 * the tracefs file system there, so older tools still
7944 * work with the newer kerenl.
7946 tr
->dir
= debugfs_create_automount("tracing", NULL
,
7947 trace_automount
, NULL
);
7949 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7950 return ERR_PTR(-ENOMEM
);
7956 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
7957 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
7959 static void __init
trace_eval_init(void)
7963 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
7964 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
7967 #ifdef CONFIG_MODULES
7968 static void trace_module_add_evals(struct module
*mod
)
7970 if (!mod
->num_trace_evals
)
7974 * Modules with bad taint do not have events created, do
7975 * not bother with enums either.
7977 if (trace_module_has_bad_taint(mod
))
7980 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
7983 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7984 static void trace_module_remove_evals(struct module
*mod
)
7986 union trace_eval_map_item
*map
;
7987 union trace_eval_map_item
**last
= &trace_eval_maps
;
7989 if (!mod
->num_trace_evals
)
7992 mutex_lock(&trace_eval_mutex
);
7994 map
= trace_eval_maps
;
7997 if (map
->head
.mod
== mod
)
7999 map
= trace_eval_jmp_to_tail(map
);
8000 last
= &map
->tail
.next
;
8001 map
= map
->tail
.next
;
8006 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
8009 mutex_unlock(&trace_eval_mutex
);
8012 static inline void trace_module_remove_evals(struct module
*mod
) { }
8013 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8015 static int trace_module_notify(struct notifier_block
*self
,
8016 unsigned long val
, void *data
)
8018 struct module
*mod
= data
;
8021 case MODULE_STATE_COMING
:
8022 trace_module_add_evals(mod
);
8024 case MODULE_STATE_GOING
:
8025 trace_module_remove_evals(mod
);
8032 static struct notifier_block trace_module_nb
= {
8033 .notifier_call
= trace_module_notify
,
8036 #endif /* CONFIG_MODULES */
8038 static __init
int tracer_init_tracefs(void)
8040 struct dentry
*d_tracer
;
8042 trace_access_lock_init();
8044 d_tracer
= tracing_init_dentry();
8045 if (IS_ERR(d_tracer
))
8048 init_tracer_tracefs(&global_trace
, d_tracer
);
8049 ftrace_init_tracefs_toplevel(&global_trace
, d_tracer
);
8051 trace_create_file("tracing_thresh", 0644, d_tracer
,
8052 &global_trace
, &tracing_thresh_fops
);
8054 trace_create_file("README", 0444, d_tracer
,
8055 NULL
, &tracing_readme_fops
);
8057 trace_create_file("saved_cmdlines", 0444, d_tracer
,
8058 NULL
, &tracing_saved_cmdlines_fops
);
8060 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
8061 NULL
, &tracing_saved_cmdlines_size_fops
);
8063 trace_create_file("saved_tgids", 0444, d_tracer
,
8064 NULL
, &tracing_saved_tgids_fops
);
8068 trace_create_eval_file(d_tracer
);
8070 #ifdef CONFIG_MODULES
8071 register_module_notifier(&trace_module_nb
);
8074 #ifdef CONFIG_DYNAMIC_FTRACE
8075 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
8076 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
8079 create_trace_instances(d_tracer
);
8081 update_tracer_options(&global_trace
);
8086 static int trace_panic_handler(struct notifier_block
*this,
8087 unsigned long event
, void *unused
)
8089 if (ftrace_dump_on_oops
)
8090 ftrace_dump(ftrace_dump_on_oops
);
8094 static struct notifier_block trace_panic_notifier
= {
8095 .notifier_call
= trace_panic_handler
,
8097 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
8100 static int trace_die_handler(struct notifier_block
*self
,
8106 if (ftrace_dump_on_oops
)
8107 ftrace_dump(ftrace_dump_on_oops
);
8115 static struct notifier_block trace_die_notifier
= {
8116 .notifier_call
= trace_die_handler
,
8121 * printk is set to max of 1024, we really don't need it that big.
8122 * Nothing should be printing 1000 characters anyway.
8124 #define TRACE_MAX_PRINT 1000
8127 * Define here KERN_TRACE so that we have one place to modify
8128 * it if we decide to change what log level the ftrace dump
8131 #define KERN_TRACE KERN_EMERG
8134 trace_printk_seq(struct trace_seq
*s
)
8136 /* Probably should print a warning here. */
8137 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
8138 s
->seq
.len
= TRACE_MAX_PRINT
;
8141 * More paranoid code. Although the buffer size is set to
8142 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8143 * an extra layer of protection.
8145 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
8146 s
->seq
.len
= s
->seq
.size
- 1;
8148 /* should be zero ended, but we are paranoid. */
8149 s
->buffer
[s
->seq
.len
] = 0;
8151 printk(KERN_TRACE
"%s", s
->buffer
);
8156 void trace_init_global_iter(struct trace_iterator
*iter
)
8158 iter
->tr
= &global_trace
;
8159 iter
->trace
= iter
->tr
->current_trace
;
8160 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
8161 iter
->trace_buffer
= &global_trace
.trace_buffer
;
8163 if (iter
->trace
&& iter
->trace
->open
)
8164 iter
->trace
->open(iter
);
8166 /* Annotate start of buffers if we had overruns */
8167 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
8168 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
8170 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8171 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
8172 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
8175 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
8177 /* use static because iter can be a bit big for the stack */
8178 static struct trace_iterator iter
;
8179 static atomic_t dump_running
;
8180 struct trace_array
*tr
= &global_trace
;
8181 unsigned int old_userobj
;
8182 unsigned long flags
;
8185 /* Only allow one dump user at a time. */
8186 if (atomic_inc_return(&dump_running
) != 1) {
8187 atomic_dec(&dump_running
);
8192 * Always turn off tracing when we dump.
8193 * We don't need to show trace output of what happens
8194 * between multiple crashes.
8196 * If the user does a sysrq-z, then they can re-enable
8197 * tracing with echo 1 > tracing_on.
8201 local_irq_save(flags
);
8202 printk_nmi_direct_enter();
8204 /* Simulate the iterator */
8205 trace_init_global_iter(&iter
);
8207 for_each_tracing_cpu(cpu
) {
8208 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8211 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
8213 /* don't look at user memory in panic mode */
8214 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
8216 switch (oops_dump_mode
) {
8218 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8221 iter
.cpu_file
= raw_smp_processor_id();
8226 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
8227 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8230 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
8232 /* Did function tracer already get disabled? */
8233 if (ftrace_is_dead()) {
8234 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8235 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8239 * We need to stop all tracing on all CPUS to read the
8240 * the next buffer. This is a bit expensive, but is
8241 * not done often. We fill all what we can read,
8242 * and then release the locks again.
8245 while (!trace_empty(&iter
)) {
8248 printk(KERN_TRACE
"---------------------------------\n");
8252 /* reset all but tr, trace, and overruns */
8253 memset(&iter
.seq
, 0,
8254 sizeof(struct trace_iterator
) -
8255 offsetof(struct trace_iterator
, seq
));
8256 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
8259 if (trace_find_next_entry_inc(&iter
) != NULL
) {
8262 ret
= print_trace_line(&iter
);
8263 if (ret
!= TRACE_TYPE_NO_CONSUME
)
8264 trace_consume(&iter
);
8266 touch_nmi_watchdog();
8268 trace_printk_seq(&iter
.seq
);
8272 printk(KERN_TRACE
" (ftrace buffer empty)\n");
8274 printk(KERN_TRACE
"---------------------------------\n");
8277 tr
->trace_flags
|= old_userobj
;
8279 for_each_tracing_cpu(cpu
) {
8280 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8282 atomic_dec(&dump_running
);
8283 printk_nmi_direct_exit();
8284 local_irq_restore(flags
);
8286 EXPORT_SYMBOL_GPL(ftrace_dump
);
8288 __init
static int tracer_alloc_buffers(void)
8294 * Make sure we don't accidently add more trace options
8295 * than we have bits for.
8297 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
8299 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
8302 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
8303 goto out_free_buffer_mask
;
8305 /* Only allocate trace_printk buffers if a trace_printk exists */
8306 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
8307 /* Must be called before global_trace.buffer is allocated */
8308 trace_printk_init_buffers();
8310 /* To save memory, keep the ring buffer size to its minimum */
8311 if (ring_buffer_expanded
)
8312 ring_buf_size
= trace_buf_size
;
8316 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
8317 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
8319 raw_spin_lock_init(&global_trace
.start_lock
);
8322 * The prepare callbacks allocates some memory for the ring buffer. We
8323 * don't free the buffer if the if the CPU goes down. If we were to free
8324 * the buffer, then the user would lose any trace that was in the
8325 * buffer. The memory will be removed once the "instance" is removed.
8327 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
8328 "trace/RB:preapre", trace_rb_cpu_prepare
,
8331 goto out_free_cpumask
;
8332 /* Used for event triggers */
8334 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
8336 goto out_rm_hp_state
;
8338 if (trace_create_savedcmd() < 0)
8339 goto out_free_temp_buffer
;
8341 /* TODO: make the number of buffers hot pluggable with CPUS */
8342 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
8343 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
8345 goto out_free_savedcmd
;
8348 if (global_trace
.buffer_disabled
)
8351 if (trace_boot_clock
) {
8352 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
8354 pr_warn("Trace clock %s not defined, going back to default\n",
8359 * register_tracer() might reference current_trace, so it
8360 * needs to be set before we register anything. This is
8361 * just a bootstrap of current_trace anyway.
8363 global_trace
.current_trace
= &nop_trace
;
8365 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8367 ftrace_init_global_array_ops(&global_trace
);
8369 init_trace_flags_index(&global_trace
);
8371 register_tracer(&nop_trace
);
8373 /* Function tracing may start here (via kernel command line) */
8374 init_function_trace();
8376 /* All seems OK, enable tracing */
8377 tracing_disabled
= 0;
8379 atomic_notifier_chain_register(&panic_notifier_list
,
8380 &trace_panic_notifier
);
8382 register_die_notifier(&trace_die_notifier
);
8384 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
8386 INIT_LIST_HEAD(&global_trace
.systems
);
8387 INIT_LIST_HEAD(&global_trace
.events
);
8388 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
8390 apply_trace_boot_options();
8392 register_snapshot_cmd();
8397 free_saved_cmdlines_buffer(savedcmd
);
8398 out_free_temp_buffer
:
8399 ring_buffer_free(temp_buffer
);
8401 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
8403 free_cpumask_var(global_trace
.tracing_cpumask
);
8404 out_free_buffer_mask
:
8405 free_cpumask_var(tracing_buffer_mask
);
8410 void __init
early_trace_init(void)
8412 if (tracepoint_printk
) {
8413 tracepoint_print_iter
=
8414 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
8415 if (WARN_ON(!tracepoint_print_iter
))
8416 tracepoint_printk
= 0;
8418 static_key_enable(&tracepoint_printk_key
.key
);
8420 tracer_alloc_buffers();
8423 void __init
trace_init(void)
8428 __init
static int clear_boot_tracer(void)
8431 * The default tracer at boot buffer is an init section.
8432 * This function is called in lateinit. If we did not
8433 * find the boot tracer, then clear it out, to prevent
8434 * later registration from accessing the buffer that is
8435 * about to be freed.
8437 if (!default_bootup_tracer
)
8440 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
8441 default_bootup_tracer
);
8442 default_bootup_tracer
= NULL
;
8447 fs_initcall(tracer_init_tracefs
);
8448 late_initcall_sync(clear_boot_tracer
);