sched clock: clean up sched_clock_cpu()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / sched_clock.c
blobb96559cb96a52153e27513a4fb7940e0fbfae8af
1 /*
2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
13 * Create a semi stable clock from a mixture of other events, including:
14 * - gtod
15 * - jiffies
16 * - sched_clock()
17 * - explicit idle events
19 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
20 * making it monotonic and keeping it within an expected window. This window
21 * is set up using jiffies.
23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
24 * that is otherwise invisible (TSC gets stopped).
26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
27 * consistent between cpus (never more than 1 jiffies difference).
29 #include <linux/sched.h>
30 #include <linux/percpu.h>
31 #include <linux/spinlock.h>
32 #include <linux/ktime.h>
33 #include <linux/module.h>
36 * Scheduler clock - returns current time in nanosec units.
37 * This is default implementation.
38 * Architectures and sub-architectures can override this.
40 unsigned long long __attribute__((weak)) sched_clock(void)
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
45 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47 struct sched_clock_data {
49 * Raw spinlock - this is a special case: this might be called
50 * from within instrumentation code so we dont want to do any
51 * instrumentation ourselves.
53 raw_spinlock_t lock;
55 unsigned long tick_jiffies;
56 u64 prev_raw;
57 u64 tick_raw;
58 u64 tick_gtod;
59 u64 clock;
62 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
64 static inline struct sched_clock_data *this_scd(void)
66 return &__get_cpu_var(sched_clock_data);
69 static inline struct sched_clock_data *cpu_sdc(int cpu)
71 return &per_cpu(sched_clock_data, cpu);
74 static __read_mostly int sched_clock_running;
76 void sched_clock_init(void)
78 u64 ktime_now = ktime_to_ns(ktime_get());
79 unsigned long now_jiffies = jiffies;
80 int cpu;
82 for_each_possible_cpu(cpu) {
83 struct sched_clock_data *scd = cpu_sdc(cpu);
85 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
86 scd->tick_jiffies = now_jiffies;
87 scd->prev_raw = 0;
88 scd->tick_raw = 0;
89 scd->tick_gtod = ktime_now;
90 scd->clock = ktime_now;
93 sched_clock_running = 1;
97 * update the percpu scd from the raw @now value
99 * - filter out backward motion
100 * - use jiffies to generate a min,max window to clip the raw values
102 static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
104 unsigned long now_jiffies = jiffies;
105 long delta_jiffies = now_jiffies - scd->tick_jiffies;
106 u64 clock = scd->clock;
107 u64 min_clock, max_clock;
108 s64 delta = now - scd->prev_raw;
110 WARN_ON_ONCE(!irqs_disabled());
111 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
113 if (unlikely(delta < 0)) {
114 clock++;
115 goto out;
118 max_clock = min_clock + TICK_NSEC;
120 if (unlikely(clock + delta > max_clock)) {
121 if (clock < max_clock)
122 clock = max_clock;
123 else
124 clock++;
125 } else {
126 clock += delta;
129 out:
130 if (unlikely(clock < min_clock))
131 clock = min_clock;
133 scd->prev_raw = now;
134 scd->tick_jiffies = now_jiffies;
135 scd->clock = clock;
138 static void lock_double_clock(struct sched_clock_data *data1,
139 struct sched_clock_data *data2)
141 if (data1 < data2) {
142 __raw_spin_lock(&data1->lock);
143 __raw_spin_lock(&data2->lock);
144 } else {
145 __raw_spin_lock(&data2->lock);
146 __raw_spin_lock(&data1->lock);
150 u64 sched_clock_cpu(int cpu)
152 struct sched_clock_data *scd = cpu_sdc(cpu);
153 u64 now, clock;
155 if (unlikely(!sched_clock_running))
156 return 0ull;
158 WARN_ON_ONCE(!irqs_disabled());
159 now = sched_clock();
161 if (cpu != raw_smp_processor_id()) {
163 * in order to update a remote cpu's clock based on our
164 * unstable raw time rebase it against:
165 * tick_raw (offset between raw counters)
166 * tick_gotd (tick offset between cpus)
168 struct sched_clock_data *my_scd = this_scd();
170 lock_double_clock(scd, my_scd);
172 now += scd->tick_raw - my_scd->tick_raw;
173 now += my_scd->tick_gtod - scd->tick_gtod;
175 __raw_spin_unlock(&my_scd->lock);
176 } else {
177 __raw_spin_lock(&scd->lock);
180 __update_sched_clock(scd, now);
181 clock = scd->clock;
183 __raw_spin_unlock(&scd->lock);
185 return clock;
188 void sched_clock_tick(void)
190 struct sched_clock_data *scd = this_scd();
191 u64 now, now_gtod;
193 if (unlikely(!sched_clock_running))
194 return;
196 WARN_ON_ONCE(!irqs_disabled());
198 now_gtod = ktime_to_ns(ktime_get());
199 now = sched_clock();
201 __raw_spin_lock(&scd->lock);
202 __update_sched_clock(scd, now);
204 * update tick_gtod after __update_sched_clock() because that will
205 * already observe 1 new jiffy; adding a new tick_gtod to that would
206 * increase the clock 2 jiffies.
208 scd->tick_raw = now;
209 scd->tick_gtod = now_gtod;
210 __raw_spin_unlock(&scd->lock);
214 * We are going deep-idle (irqs are disabled):
216 void sched_clock_idle_sleep_event(void)
218 sched_clock_cpu(smp_processor_id());
220 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
223 * We just idled delta nanoseconds (called with irqs disabled):
225 void sched_clock_idle_wakeup_event(u64 delta_ns)
227 struct sched_clock_data *scd = this_scd();
228 u64 now = sched_clock();
231 * Override the previous timestamp and ignore all
232 * sched_clock() deltas that occured while we idled,
233 * and use the PM-provided delta_ns to advance the
234 * rq clock:
236 __raw_spin_lock(&scd->lock);
237 scd->prev_raw = now;
238 scd->clock += delta_ns;
239 __raw_spin_unlock(&scd->lock);
241 touch_softlockup_watchdog();
243 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
245 #endif
247 unsigned long long cpu_clock(int cpu)
249 unsigned long long clock;
250 unsigned long flags;
252 local_irq_save(flags);
253 clock = sched_clock_cpu(cpu);
254 local_irq_restore(flags);
256 return clock;
258 EXPORT_SYMBOL_GPL(cpu_clock);