2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/workqueue.h>
10 #include <linux/cpufreq.h>
11 #include <linux/jiffies.h>
12 #include <linux/init.h>
13 #include <linux/dmi.h>
15 #include <asm/delay.h>
18 #include <asm/timer.h>
20 #include "mach_timer.h"
22 static int tsc_enabled
;
25 * On some systems the TSC frequency does not
26 * change with the cpu frequency. So we need
27 * an extra value to store the TSC freq
30 EXPORT_SYMBOL_GPL(tsc_khz
);
35 static int __init
tsc_setup(char *str
)
37 printk(KERN_WARNING
"notsc: Kernel compiled with CONFIG_X86_TSC, "
38 "cannot disable TSC.\n");
43 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
46 static int __init
tsc_setup(char *str
)
54 __setup("notsc", tsc_setup
);
57 * code to mark and check if the TSC is unstable
58 * due to cpufreq or due to unsynced TSCs
60 static int tsc_unstable
;
62 int check_tsc_unstable(void)
66 EXPORT_SYMBOL_GPL(check_tsc_unstable
);
68 /* Accellerators for sched_clock()
69 * convert from cycles(64bits) => nanoseconds (64bits)
71 * ns = cycles / (freq / ns_per_sec)
72 * ns = cycles * (ns_per_sec / freq)
73 * ns = cycles * (10^9 / (cpu_khz * 10^3))
74 * ns = cycles * (10^6 / cpu_khz)
76 * Then we use scaling math (suggested by george@mvista.com) to get:
77 * ns = cycles * (10^6 * SC / cpu_khz) / SC
78 * ns = cycles * cyc2ns_scale / SC
80 * And since SC is a constant power of two, we can convert the div
83 * We can use khz divisor instead of mhz to keep a better percision, since
84 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
85 * (mathieu.desnoyers@polymtl.ca)
87 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
89 unsigned long cyc2ns_scale __read_mostly
;
91 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
93 static inline void set_cyc2ns_scale(unsigned long cpu_khz
)
95 cyc2ns_scale
= (1000000 << CYC2NS_SCALE_FACTOR
)/cpu_khz
;
99 * Scheduler clock - returns current time in nanosec units.
101 unsigned long long native_sched_clock(void)
103 unsigned long long this_offset
;
106 * Fall back to jiffies if there's no TSC available:
107 * ( But note that we still use it if the TSC is marked
108 * unstable. We do this because unlike Time Of Day,
109 * the scheduler clock tolerates small errors and it's
110 * very important for it to be as fast as the platform
113 if (unlikely(!tsc_enabled
&& !tsc_unstable
))
114 /* No locking but a rare wrong value is not a big deal: */
115 return (jiffies_64
- INITIAL_JIFFIES
) * (1000000000 / HZ
);
117 /* read the Time Stamp Counter: */
118 rdtscll(this_offset
);
120 /* return the value in ns */
121 return cycles_2_ns(this_offset
);
124 /* We need to define a real function for sched_clock, to override the
125 weak default version */
126 #ifdef CONFIG_PARAVIRT
127 unsigned long long sched_clock(void)
129 return paravirt_sched_clock();
132 unsigned long long sched_clock(void)
133 __attribute__((alias("native_sched_clock")));
136 unsigned long native_calculate_cpu_khz(void)
138 unsigned long long start
, end
;
144 local_irq_save(flags
);
146 /* run 3 times to ensure the cache is warm */
147 for (i
= 0; i
< 3; i
++) {
148 mach_prepare_counter();
150 mach_countup(&count
);
154 * Error: ECTCNEVERSET
155 * The CTC wasn't reliable: we got a hit on the very first read,
156 * or the CPU was so fast/slow that the quotient wouldn't fit in
162 delta64
= end
- start
;
164 /* cpu freq too fast: */
165 if (delta64
> (1ULL<<32))
168 /* cpu freq too slow: */
169 if (delta64
<= CALIBRATE_TIME_MSEC
)
172 delta64
+= CALIBRATE_TIME_MSEC
/2; /* round for do_div */
173 do_div(delta64
,CALIBRATE_TIME_MSEC
);
175 local_irq_restore(flags
);
176 return (unsigned long)delta64
;
178 local_irq_restore(flags
);
182 int recalibrate_cpu_khz(void)
185 unsigned long cpu_khz_old
= cpu_khz
;
188 cpu_khz
= calculate_cpu_khz();
190 cpu_data
[0].loops_per_jiffy
=
191 cpufreq_scale(cpu_data
[0].loops_per_jiffy
,
192 cpu_khz_old
, cpu_khz
);
201 EXPORT_SYMBOL(recalibrate_cpu_khz
);
203 #ifdef CONFIG_CPU_FREQ
206 * if the CPU frequency is scaled, TSC-based delays will need a different
207 * loops_per_jiffy value to function properly.
209 static unsigned int ref_freq
= 0;
210 static unsigned long loops_per_jiffy_ref
= 0;
211 static unsigned long cpu_khz_ref
= 0;
214 time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
216 struct cpufreq_freqs
*freq
= data
;
220 ref_freq
= freq
->new;
223 ref_freq
= freq
->old
;
224 loops_per_jiffy_ref
= cpu_data
[freq
->cpu
].loops_per_jiffy
;
225 cpu_khz_ref
= cpu_khz
;
228 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
229 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
230 (val
== CPUFREQ_RESUMECHANGE
)) {
231 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
232 cpu_data
[freq
->cpu
].loops_per_jiffy
=
233 cpufreq_scale(loops_per_jiffy_ref
,
234 ref_freq
, freq
->new);
238 if (num_online_cpus() == 1)
239 cpu_khz
= cpufreq_scale(cpu_khz_ref
,
240 ref_freq
, freq
->new);
241 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
)) {
243 set_cyc2ns_scale(cpu_khz
);
245 * TSC based sched_clock turns
248 mark_tsc_unstable("cpufreq changes");
256 static struct notifier_block time_cpufreq_notifier_block
= {
257 .notifier_call
= time_cpufreq_notifier
260 static int __init
cpufreq_tsc(void)
262 return cpufreq_register_notifier(&time_cpufreq_notifier_block
,
263 CPUFREQ_TRANSITION_NOTIFIER
);
265 core_initcall(cpufreq_tsc
);
269 /* clock source code */
271 static unsigned long current_tsc_khz
= 0;
273 static cycle_t
read_tsc(void)
282 static struct clocksource clocksource_tsc
= {
286 .mask
= CLOCKSOURCE_MASK(64),
287 .mult
= 0, /* to be set */
289 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
|
290 CLOCK_SOURCE_MUST_VERIFY
,
293 void mark_tsc_unstable(char *reason
)
298 printk("Marking TSC unstable due to: %s.\n", reason
);
299 /* Can be called before registration */
300 if (clocksource_tsc
.mult
)
301 clocksource_change_rating(&clocksource_tsc
, 0);
303 clocksource_tsc
.rating
= 0;
306 EXPORT_SYMBOL_GPL(mark_tsc_unstable
);
308 static int __init
dmi_mark_tsc_unstable(struct dmi_system_id
*d
)
310 printk(KERN_NOTICE
"%s detected: marking TSC unstable.\n",
316 /* List of systems that have known TSC problems */
317 static struct dmi_system_id __initdata bad_tsc_dmi_table
[] = {
319 .callback
= dmi_mark_tsc_unstable
,
320 .ident
= "IBM Thinkpad 380XD",
322 DMI_MATCH(DMI_BOARD_VENDOR
, "IBM"),
323 DMI_MATCH(DMI_BOARD_NAME
, "2635FA0"),
330 * Make an educated guess if the TSC is trustworthy and synchronized
333 __cpuinit
int unsynchronized_tsc(void)
335 if (!cpu_has_tsc
|| tsc_unstable
)
338 * Intel systems are normally all synchronized.
339 * Exceptions must mark TSC as unstable:
341 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
342 /* assume multi socket systems are not synchronized: */
343 if (num_possible_cpus() > 1)
350 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
352 #ifdef CONFIG_MGEODE_LX
353 /* RTSC counts during suspend */
354 #define RTSC_SUSP 0x100
356 static void __init
check_geode_tsc_reliable(void)
360 rdmsrl(MSR_GEODE_BUSCONT_CONF0
, val
);
361 if ((val
& RTSC_SUSP
))
362 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_MUST_VERIFY
;
365 static inline void check_geode_tsc_reliable(void) { }
369 void __init
tsc_init(void)
371 if (!cpu_has_tsc
|| tsc_disable
)
374 cpu_khz
= calculate_cpu_khz();
380 printk("Detected %lu.%03lu MHz processor.\n",
381 (unsigned long)cpu_khz
/ 1000,
382 (unsigned long)cpu_khz
% 1000);
384 set_cyc2ns_scale(cpu_khz
);
387 /* Check and install the TSC clocksource */
388 dmi_check_system(bad_tsc_dmi_table
);
390 unsynchronized_tsc();
391 check_geode_tsc_reliable();
392 current_tsc_khz
= tsc_khz
;
393 clocksource_tsc
.mult
= clocksource_khz2mult(current_tsc_khz
,
394 clocksource_tsc
.shift
);
395 /* lower the rating if we already know its unstable: */
396 if (check_tsc_unstable()) {
397 clocksource_tsc
.rating
= 0;
398 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_IS_CONTINUOUS
;
402 clocksource_register(&clocksource_tsc
);
408 * Set the tsc_disable flag if there's no TSC support, this
409 * makes it a fast flag for the kernel to see whether it
410 * should be using the TSC.