1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/timer.h>
6 #include <linux/acpi_pmtmr.h>
7 #include <linux/cpufreq.h>
9 #include <linux/delay.h>
10 #include <linux/clocksource.h>
11 #include <linux/percpu.h>
14 #include <asm/timer.h>
15 #include <asm/vgtod.h>
17 #include <asm/delay.h>
19 unsigned int cpu_khz
; /* TSC clocks / usec, not used here */
20 EXPORT_SYMBOL(cpu_khz
);
22 EXPORT_SYMBOL(tsc_khz
);
25 * TSC can be unstable due to cpufreq or due to unsynced TSCs
27 static int tsc_unstable
;
29 /* native_sched_clock() is called before tsc_init(), so
30 we must start with the TSC soft disabled to prevent
31 erroneous rdtsc usage on !cpu_has_tsc processors */
32 static int tsc_disabled
= -1;
35 * Scheduler clock - returns current time in nanosec units.
37 u64
native_sched_clock(void)
42 * Fall back to jiffies if there's no TSC available:
43 * ( But note that we still use it if the TSC is marked
44 * unstable. We do this because unlike Time Of Day,
45 * the scheduler clock tolerates small errors and it's
46 * very important for it to be as fast as the platform
49 if (unlikely(tsc_disabled
)) {
50 /* No locking but a rare wrong value is not a big deal: */
51 return (jiffies_64
- INITIAL_JIFFIES
) * (1000000000 / HZ
);
54 /* read the Time Stamp Counter: */
57 /* return the value in ns */
58 return cycles_2_ns(this_offset
);
61 /* We need to define a real function for sched_clock, to override the
62 weak default version */
63 #ifdef CONFIG_PARAVIRT
64 unsigned long long sched_clock(void)
66 return paravirt_sched_clock();
70 sched_clock(void) __attribute__((alias("native_sched_clock")));
73 int check_tsc_unstable(void)
77 EXPORT_SYMBOL_GPL(check_tsc_unstable
);
80 int __init
notsc_setup(char *str
)
82 printk(KERN_WARNING
"notsc: Kernel compiled with CONFIG_X86_TSC, "
83 "cannot disable TSC completely.\n");
89 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
92 int __init
notsc_setup(char *str
)
94 setup_clear_cpu_cap(X86_FEATURE_TSC
);
99 __setup("notsc", notsc_setup
);
101 #define MAX_RETRIES 5
102 #define SMI_TRESHOLD 50000
105 * Read TSC and the reference counters. Take care of SMI disturbance
107 static u64
tsc_read_refs(u64
*p
, int hpet
)
112 for (i
= 0; i
< MAX_RETRIES
; i
++) {
115 *p
= hpet_readl(HPET_COUNTER
) & 0xFFFFFFFF;
117 *p
= acpi_pm_read_early();
119 if ((t2
- t1
) < SMI_TRESHOLD
)
126 * Calculate the TSC frequency from HPET reference
128 static unsigned long calc_hpet_ref(u64 deltatsc
, u64 hpet1
, u64 hpet2
)
133 hpet2
+= 0x100000000ULL
;
135 tmp
= ((u64
)hpet2
* hpet_readl(HPET_PERIOD
));
136 do_div(tmp
, 1000000);
137 do_div(deltatsc
, tmp
);
139 return (unsigned long) deltatsc
;
143 * Calculate the TSC frequency from PMTimer reference
145 static unsigned long calc_pmtimer_ref(u64 deltatsc
, u64 pm1
, u64 pm2
)
153 pm2
+= (u64
)ACPI_PM_OVRRUN
;
155 tmp
= pm2
* 1000000000LL;
156 do_div(tmp
, PMTMR_TICKS_PER_SEC
);
157 do_div(deltatsc
, tmp
);
159 return (unsigned long) deltatsc
;
163 #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
164 #define CAL_PIT_LOOPS 5000
167 * Try to calibrate the TSC against the Programmable
168 * Interrupt Timer and return the frequency of the TSC
171 * Return ULONG_MAX on failure to calibrate.
173 static unsigned long pit_calibrate_tsc(void)
175 u64 tsc
, t1
, t2
, delta
;
176 unsigned long tscmin
, tscmax
;
179 /* Set the Gate high, disable speaker */
180 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
183 * Setup CTC channel 2* for mode 0, (interrupt on terminal
184 * count mode), binary count. Set the latch register to 50ms
185 * (LSB then MSB) to begin countdown.
188 outb(CAL_LATCH
& 0xff, 0x42);
189 outb(CAL_LATCH
>> 8, 0x42);
191 tsc
= t1
= t2
= get_cycles();
196 while ((inb(0x61) & 0x20) == 0) {
200 if ((unsigned long) delta
< tscmin
)
201 tscmin
= (unsigned int) delta
;
202 if ((unsigned long) delta
> tscmax
)
203 tscmax
= (unsigned int) delta
;
210 * If we were not able to read the PIT more than PIT_MIN_LOOPS
211 * times, then we have been hit by a massive SMI
213 * If the maximum is 10 times larger than the minimum,
214 * then we got hit by an SMI as well.
216 if (pitcnt
< CAL_PIT_LOOPS
|| tscmax
> 10 * tscmin
)
219 /* Calculate the PIT value */
221 do_div(delta
, CAL_MS
);
227 * native_calibrate_tsc - calibrate the tsc on boot
229 unsigned long native_calibrate_tsc(void)
231 u64 tsc1
, tsc2
, delta
, ref1
, ref2
;
232 unsigned long tsc_pit_min
= ULONG_MAX
, tsc_ref_min
= ULONG_MAX
;
234 int hpet
= is_hpet_enabled(), i
;
237 * Run 5 calibration loops to get the lowest frequency value
238 * (the best estimate). We use two different calibration modes
241 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
242 * load a timeout of 50ms. We read the time right after we
243 * started the timer and wait until the PIT count down reaches
244 * zero. In each wait loop iteration we read the TSC and check
245 * the delta to the previous read. We keep track of the min
246 * and max values of that delta. The delta is mostly defined
247 * by the IO time of the PIT access, so we can detect when a
248 * SMI/SMM disturbance happend between the two reads. If the
249 * maximum time is significantly larger than the minimum time,
250 * then we discard the result and have another try.
252 * 2) Reference counter. If available we use the HPET or the
253 * PMTIMER as a reference to check the sanity of that value.
254 * We use separate TSC readouts and check inside of the
255 * reference read for a SMI/SMM disturbance. We dicard
256 * disturbed values here as well. We do that around the PIT
257 * calibration delay loop as we have to wait for a certain
258 * amount of time anyway.
260 for (i
= 0; i
< 5; i
++) {
261 unsigned long tsc_pit_khz
;
264 * Read the start value and the reference count of
265 * hpet/pmtimer when available. Then do the PIT
266 * calibration, which will take at least 50ms, and
267 * read the end value.
269 local_irq_save(flags
);
270 tsc1
= tsc_read_refs(&ref1
, hpet
);
271 tsc_pit_khz
= pit_calibrate_tsc();
272 tsc2
= tsc_read_refs(&ref2
, hpet
);
273 local_irq_restore(flags
);
275 /* Pick the lowest PIT TSC calibration so far */
276 tsc_pit_min
= min(tsc_pit_min
, tsc_pit_khz
);
278 /* hpet or pmtimer available ? */
279 if (!hpet
&& !ref1
&& !ref2
)
282 /* Check, whether the sampling was disturbed by an SMI */
283 if (tsc1
== ULLONG_MAX
|| tsc2
== ULLONG_MAX
)
286 tsc2
= (tsc2
- tsc1
) * 1000000LL;
288 tsc2
= calc_hpet_ref(tsc2
, ref1
, ref2
);
290 tsc2
= calc_pmtimer_ref(tsc2
, ref1
, ref2
);
292 tsc_ref_min
= min(tsc_ref_min
, (unsigned long) tsc2
);
296 * Now check the results.
298 if (tsc_pit_min
== ULONG_MAX
) {
299 /* PIT gave no useful value */
300 printk(KERN_WARNING
"TSC: PIT calibration failed due to "
301 "SMI disturbance.\n");
303 /* We don't have an alternative source, disable TSC */
304 if (!hpet
&& !ref1
&& !ref2
) {
305 printk("TSC: No reference (HPET/PMTIMER) available\n");
309 /* The alternative source failed as well, disable TSC */
310 if (tsc_ref_min
== ULONG_MAX
) {
311 printk(KERN_WARNING
"TSC: HPET/PMTIMER calibration "
312 "failed due to SMI disturbance.\n");
316 /* Use the alternative source */
317 printk(KERN_INFO
"TSC: using %s reference calibration\n",
318 hpet
? "HPET" : "PMTIMER");
323 /* We don't have an alternative source, use the PIT calibration value */
324 if (!hpet
&& !ref1
&& !ref2
) {
325 printk(KERN_INFO
"TSC: Using PIT calibration value\n");
329 /* The alternative source failed, use the PIT calibration value */
330 if (tsc_ref_min
== ULONG_MAX
) {
331 printk(KERN_WARNING
"TSC: HPET/PMTIMER calibration failed due "
332 "to SMI disturbance. Using PIT calibration\n");
336 /* Check the reference deviation */
337 delta
= ((u64
) tsc_pit_min
) * 100;
338 do_div(delta
, tsc_ref_min
);
341 * If both calibration results are inside a 5% window, the we
342 * use the lower frequency of those as it is probably the
345 if (delta
>= 95 && delta
<= 105) {
346 printk(KERN_INFO
"TSC: PIT calibration confirmed by %s.\n",
347 hpet
? "HPET" : "PMTIMER");
348 printk(KERN_INFO
"TSC: using %s calibration value\n",
349 tsc_pit_min
<= tsc_ref_min
? "PIT" :
350 hpet
? "HPET" : "PMTIMER");
351 return tsc_pit_min
<= tsc_ref_min
? tsc_pit_min
: tsc_ref_min
;
354 printk(KERN_WARNING
"TSC: PIT calibration deviates from %s: %lu %lu.\n",
355 hpet
? "HPET" : "PMTIMER", tsc_pit_min
, tsc_ref_min
);
358 * The calibration values differ too much. In doubt, we use
359 * the PIT value as we know that there are PMTIMERs around
360 * running at double speed.
362 printk(KERN_INFO
"TSC: Using PIT calibration value\n");
367 /* Only called from the Powernow K7 cpu freq driver */
368 int recalibrate_cpu_khz(void)
371 unsigned long cpu_khz_old
= cpu_khz
;
374 tsc_khz
= calibrate_tsc();
376 cpu_data(0).loops_per_jiffy
=
377 cpufreq_scale(cpu_data(0).loops_per_jiffy
,
378 cpu_khz_old
, cpu_khz
);
387 EXPORT_SYMBOL(recalibrate_cpu_khz
);
389 #endif /* CONFIG_X86_32 */
391 /* Accelerators for sched_clock()
392 * convert from cycles(64bits) => nanoseconds (64bits)
394 * ns = cycles / (freq / ns_per_sec)
395 * ns = cycles * (ns_per_sec / freq)
396 * ns = cycles * (10^9 / (cpu_khz * 10^3))
397 * ns = cycles * (10^6 / cpu_khz)
399 * Then we use scaling math (suggested by george@mvista.com) to get:
400 * ns = cycles * (10^6 * SC / cpu_khz) / SC
401 * ns = cycles * cyc2ns_scale / SC
403 * And since SC is a constant power of two, we can convert the div
406 * We can use khz divisor instead of mhz to keep a better precision, since
407 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
408 * (mathieu.desnoyers@polymtl.ca)
410 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
413 DEFINE_PER_CPU(unsigned long, cyc2ns
);
415 static void set_cyc2ns_scale(unsigned long cpu_khz
, int cpu
)
417 unsigned long long tsc_now
, ns_now
;
418 unsigned long flags
, *scale
;
420 local_irq_save(flags
);
421 sched_clock_idle_sleep_event();
423 scale
= &per_cpu(cyc2ns
, cpu
);
426 ns_now
= __cycles_2_ns(tsc_now
);
429 *scale
= (NSEC_PER_MSEC
<< CYC2NS_SCALE_FACTOR
)/cpu_khz
;
431 sched_clock_idle_wakeup_event(0);
432 local_irq_restore(flags
);
435 #ifdef CONFIG_CPU_FREQ
437 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
440 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
441 * not that important because current Opteron setups do not support
442 * scaling on SMP anyroads.
444 * Should fix up last_tsc too. Currently gettimeofday in the
445 * first tick after the change will be slightly wrong.
448 static unsigned int ref_freq
;
449 static unsigned long loops_per_jiffy_ref
;
450 static unsigned long tsc_khz_ref
;
452 static int time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
455 struct cpufreq_freqs
*freq
= data
;
456 unsigned long *lpj
, dummy
;
458 if (cpu_has(&cpu_data(freq
->cpu
), X86_FEATURE_CONSTANT_TSC
))
462 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
464 lpj
= &cpu_data(freq
->cpu
).loops_per_jiffy
;
466 lpj
= &boot_cpu_data
.loops_per_jiffy
;
470 ref_freq
= freq
->old
;
471 loops_per_jiffy_ref
= *lpj
;
472 tsc_khz_ref
= tsc_khz
;
474 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
475 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
476 (val
== CPUFREQ_RESUMECHANGE
)) {
477 *lpj
= cpufreq_scale(loops_per_jiffy_ref
, ref_freq
, freq
->new);
479 tsc_khz
= cpufreq_scale(tsc_khz_ref
, ref_freq
, freq
->new);
480 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
481 mark_tsc_unstable("cpufreq changes");
484 set_cyc2ns_scale(tsc_khz
, freq
->cpu
);
489 static struct notifier_block time_cpufreq_notifier_block
= {
490 .notifier_call
= time_cpufreq_notifier
493 static int __init
cpufreq_tsc(void)
497 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
499 cpufreq_register_notifier(&time_cpufreq_notifier_block
,
500 CPUFREQ_TRANSITION_NOTIFIER
);
504 core_initcall(cpufreq_tsc
);
506 #endif /* CONFIG_CPU_FREQ */
508 /* clocksource code */
510 static struct clocksource clocksource_tsc
;
513 * We compare the TSC to the cycle_last value in the clocksource
514 * structure to avoid a nasty time-warp. This can be observed in a
515 * very small window right after one CPU updated cycle_last under
516 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
517 * is smaller than the cycle_last reference value due to a TSC which
518 * is slighty behind. This delta is nowhere else observable, but in
519 * that case it results in a forward time jump in the range of hours
520 * due to the unsigned delta calculation of the time keeping core
521 * code, which is necessary to support wrapping clocksources like pm
524 static cycle_t
read_tsc(void)
526 cycle_t ret
= (cycle_t
)get_cycles();
528 return ret
>= clocksource_tsc
.cycle_last
?
529 ret
: clocksource_tsc
.cycle_last
;
533 static cycle_t __vsyscall_fn
vread_tsc(void)
535 cycle_t ret
= (cycle_t
)vget_cycles();
537 return ret
>= __vsyscall_gtod_data
.clock
.cycle_last
?
538 ret
: __vsyscall_gtod_data
.clock
.cycle_last
;
542 static struct clocksource clocksource_tsc
= {
546 .mask
= CLOCKSOURCE_MASK(64),
548 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
|
549 CLOCK_SOURCE_MUST_VERIFY
,
555 void mark_tsc_unstable(char *reason
)
559 printk("Marking TSC unstable due to %s\n", reason
);
560 /* Change only the rating, when not registered */
561 if (clocksource_tsc
.mult
)
562 clocksource_change_rating(&clocksource_tsc
, 0);
564 clocksource_tsc
.rating
= 0;
568 EXPORT_SYMBOL_GPL(mark_tsc_unstable
);
570 static int __init
dmi_mark_tsc_unstable(const struct dmi_system_id
*d
)
572 printk(KERN_NOTICE
"%s detected: marking TSC unstable.\n",
578 /* List of systems that have known TSC problems */
579 static struct dmi_system_id __initdata bad_tsc_dmi_table
[] = {
581 .callback
= dmi_mark_tsc_unstable
,
582 .ident
= "IBM Thinkpad 380XD",
584 DMI_MATCH(DMI_BOARD_VENDOR
, "IBM"),
585 DMI_MATCH(DMI_BOARD_NAME
, "2635FA0"),
592 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
594 #ifdef CONFIG_MGEODE_LX
595 /* RTSC counts during suspend */
596 #define RTSC_SUSP 0x100
598 static void __init
check_geode_tsc_reliable(void)
600 unsigned long res_low
, res_high
;
602 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0
, &res_low
, &res_high
);
603 if (res_low
& RTSC_SUSP
)
604 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_MUST_VERIFY
;
607 static inline void check_geode_tsc_reliable(void) { }
611 * Make an educated guess if the TSC is trustworthy and synchronized
614 __cpuinit
int unsynchronized_tsc(void)
616 if (!cpu_has_tsc
|| tsc_unstable
)
620 if (apic_is_clustered_box())
624 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
627 * Intel systems are normally all synchronized.
628 * Exceptions must mark TSC as unstable:
630 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
631 /* assume multi socket systems are not synchronized: */
632 if (num_possible_cpus() > 1)
639 static void __init
init_tsc_clocksource(void)
641 clocksource_tsc
.mult
= clocksource_khz2mult(tsc_khz
,
642 clocksource_tsc
.shift
);
643 /* lower the rating if we already know its unstable: */
644 if (check_tsc_unstable()) {
645 clocksource_tsc
.rating
= 0;
646 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_IS_CONTINUOUS
;
648 clocksource_register(&clocksource_tsc
);
651 void __init
tsc_init(void)
659 tsc_khz
= calibrate_tsc();
663 mark_tsc_unstable("could not calculate TSC khz");
668 if (cpu_has(&boot_cpu_data
, X86_FEATURE_CONSTANT_TSC
) &&
669 (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
))
670 cpu_khz
= calibrate_cpu();
673 lpj
= ((u64
)tsc_khz
* 1000);
677 printk("Detected %lu.%03lu MHz processor.\n",
678 (unsigned long)cpu_khz
/ 1000,
679 (unsigned long)cpu_khz
% 1000);
682 * Secondary CPUs do not run through tsc_init(), so set up
683 * all the scale factors for all CPUs, assuming the same
684 * speed as the bootup CPU. (cpufreq notifiers will fix this
685 * up if their speed diverges)
687 for_each_possible_cpu(cpu
)
688 set_cyc2ns_scale(cpu_khz
, cpu
);
690 if (tsc_disabled
> 0)
693 /* now allow native_sched_clock() to use rdtsc */
697 /* Check and install the TSC clocksource */
698 dmi_check_system(bad_tsc_dmi_table
);
700 if (unsynchronized_tsc())
701 mark_tsc_unstable("TSCs unsynchronized");
703 check_geode_tsc_reliable();
704 init_tsc_clocksource();