split dev_queue
[cor.git] / kernel / trace / trace_clock.c
blobaaf6793ededaa2b4a1aab5ea6653bd82c110ba4f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * tracing clocks
5 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Implements 3 trace clock variants, with differing scalability/precision
8 * tradeoffs:
10 * - local: CPU-local trace clock
11 * - medium: scalable global clock with some jitter
12 * - global: globally monotonic, serialized clock
14 * Tracer plugins will chose a default from these clocks.
16 #include <linux/spinlock.h>
17 #include <linux/irqflags.h>
18 #include <linux/hardirq.h>
19 #include <linux/module.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/clock.h>
23 #include <linux/ktime.h>
24 #include <linux/trace_clock.h>
27 * trace_clock_local(): the simplest and least coherent tracing clock.
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
32 u64 notrace trace_clock_local(void)
34 u64 clock;
37 * sched_clock() is an architecture implemented, fast, scalable,
38 * lockless clock. It is not guaranteed to be coherent across
39 * CPUs, nor across CPU idle events.
41 preempt_disable_notrace();
42 clock = sched_clock();
43 preempt_enable_notrace();
45 return clock;
47 EXPORT_SYMBOL_GPL(trace_clock_local);
50 * trace_clock(): 'between' trace clock. Not completely serialized,
51 * but not completely incorrect when crossing CPUs either.
53 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
54 * jitter between CPUs. So it's a pretty scalable clock, but there
55 * can be offsets in the trace data.
57 u64 notrace trace_clock(void)
59 return local_clock();
61 EXPORT_SYMBOL_GPL(trace_clock);
64 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
65 * Note that this use of jiffies_64 is not completely safe on
66 * 32-bit systems. But the window is tiny, and the effect if
67 * we are affected is that we will have an obviously bogus
68 * timestamp on a trace event - i.e. not life threatening.
70 u64 notrace trace_clock_jiffies(void)
72 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
74 EXPORT_SYMBOL_GPL(trace_clock_jiffies);
77 * trace_clock_global(): special globally coherent trace clock
79 * It has higher overhead than the other trace clocks but is still
80 * an order of magnitude faster than GTOD derived hardware clocks.
82 * Used by plugins that need globally coherent timestamps.
85 /* keep prev_time and lock in the same cacheline. */
86 static struct {
87 u64 prev_time;
88 arch_spinlock_t lock;
89 } trace_clock_struct ____cacheline_aligned_in_smp =
91 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
94 u64 notrace trace_clock_global(void)
96 unsigned long flags;
97 int this_cpu;
98 u64 now;
100 raw_local_irq_save(flags);
102 this_cpu = raw_smp_processor_id();
103 now = sched_clock_cpu(this_cpu);
105 * If in an NMI context then dont risk lockups and return the
106 * cpu_clock() time:
108 if (unlikely(in_nmi()))
109 goto out;
111 arch_spin_lock(&trace_clock_struct.lock);
114 * TODO: if this happens often then maybe we should reset
115 * my_scd->clock to prev_time+1, to make sure
116 * we start ticking with the local clock from now on?
118 if ((s64)(now - trace_clock_struct.prev_time) < 0)
119 now = trace_clock_struct.prev_time + 1;
121 trace_clock_struct.prev_time = now;
123 arch_spin_unlock(&trace_clock_struct.lock);
125 out:
126 raw_local_irq_restore(flags);
128 return now;
130 EXPORT_SYMBOL_GPL(trace_clock_global);
132 static atomic64_t trace_counter;
135 * trace_clock_counter(): simply an atomic counter.
136 * Use the trace_counter "counter" for cases where you do not care
137 * about timings, but are interested in strict ordering.
139 u64 notrace trace_clock_counter(void)
141 return atomic64_add_return(1, &trace_counter);