2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/irq.h>
22 #include <linux/time.h>
23 #include <linux/ioport.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/bcd.h>
27 #include <asm/pgtable.h>
28 #include <asm/vsyscall.h>
29 #include <asm/timex.h>
30 #include <asm/proto.h>
31 #ifdef CONFIG_X86_LOCAL_APIC
35 u64 jiffies_64
= INITIAL_JIFFIES
;
37 extern int using_apic_timer
;
39 spinlock_t rtc_lock
= SPIN_LOCK_UNLOCKED
;
40 spinlock_t i8253_lock
= SPIN_LOCK_UNLOCKED
;
42 #undef HPET_HACK_ENABLE_DANGEROUS
45 unsigned int cpu_khz
; /* TSC clocks / usec, not used here */
46 unsigned long hpet_period
; /* fsecs / HPET clock */
47 unsigned long hpet_tick
; /* HPET clocks / interrupt */
48 unsigned long vxtime_hz
= 1193182;
49 int report_lost_ticks
; /* command line option */
50 unsigned long long monotonic_base
;
52 struct vxtime_data __vxtime __section_vxtime
; /* for vsyscalls */
54 volatile unsigned long __jiffies __section_jiffies
= INITIAL_JIFFIES
;
55 unsigned long __wall_jiffies __section_wall_jiffies
= INITIAL_JIFFIES
;
56 struct timespec __xtime __section_xtime
;
57 struct timezone __sys_tz __section_sys_tz
;
59 static inline void rdtscll_sync(unsigned long *tsc
)
68 * do_gettimeoffset() returns microseconds since last timer interrupt was
69 * triggered by hardware. A memory read of HPET is slower than a register read
70 * of TSC, but much more reliable. It's also synchronized to the timer
71 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
72 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
73 * This is not a problem, because jiffies hasn't updated either. They are bound
74 * together by xtime_lock.
77 static inline unsigned int do_gettimeoffset_tsc(void)
82 x
= ((t
- vxtime
.last_tsc
) * vxtime
.tsc_quot
) >> 32;
86 static inline unsigned int do_gettimeoffset_hpet(void)
88 return ((hpet_readl(HPET_COUNTER
) - vxtime
.last
) * vxtime
.quot
) >> 32;
91 unsigned int (*do_gettimeoffset
)(void) = do_gettimeoffset_tsc
;
94 * This version of gettimeofday() has microsecond resolution and better than
95 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
99 void do_gettimeofday(struct timeval
*tv
)
101 unsigned long seq
, t
;
102 unsigned int sec
, usec
;
105 seq
= read_seqbegin(&xtime_lock
);
108 usec
= xtime
.tv_nsec
/ 1000;
110 t
= (jiffies
- wall_jiffies
) * (1000000L / HZ
) +
114 } while (read_seqretry(&xtime_lock
, seq
));
116 tv
->tv_sec
= sec
+ usec
/ 1000000;
117 tv
->tv_usec
= usec
% 1000000;
121 * settimeofday() first undoes the correction that gettimeofday would do
122 * on the time, and then saves it. This is ugly, but has been like this for
126 int do_settimeofday(struct timespec
*tv
)
128 time_t wtm_sec
, sec
= tv
->tv_sec
;
129 long wtm_nsec
, nsec
= tv
->tv_nsec
;
131 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
134 write_seqlock_irq(&xtime_lock
);
136 nsec
-= do_gettimeoffset() * 1000 +
137 (jiffies
- wall_jiffies
) * (NSEC_PER_SEC
/HZ
);
139 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- sec
);
140 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- nsec
);
142 set_normalized_timespec(&xtime
, sec
, nsec
);
143 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
145 time_adjust
= 0; /* stop active adjtime() */
146 time_status
|= STA_UNSYNC
;
147 time_maxerror
= NTP_PHASE_LIMIT
;
148 time_esterror
= NTP_PHASE_LIMIT
;
150 write_sequnlock_irq(&xtime_lock
);
156 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
157 * ms after the second nowtime has started, because when nowtime is written
158 * into the registers of the CMOS clock, it will jump to the next second
159 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
163 static void set_rtc_mmss(unsigned long nowtime
)
165 int real_seconds
, real_minutes
, cmos_minutes
;
166 unsigned char control
, freq_select
;
169 * IRQs are disabled when we're called from the timer interrupt,
170 * no need for spin_lock_irqsave()
173 spin_lock(&rtc_lock
);
176 * Tell the clock it's being set and stop it.
179 control
= CMOS_READ(RTC_CONTROL
);
180 CMOS_WRITE(control
| RTC_SET
, RTC_CONTROL
);
182 freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
183 CMOS_WRITE(freq_select
| RTC_DIV_RESET2
, RTC_FREQ_SELECT
);
185 cmos_minutes
= CMOS_READ(RTC_MINUTES
);
186 BCD_TO_BIN(cmos_minutes
);
189 * since we're only adjusting minutes and seconds, don't interfere with hour
190 * overflow. This avoids messing with unknown time zones but requires your RTC
191 * not to be off by more than 15 minutes. Since we're calling it only when
192 * our clock is externally synchronized using NTP, this shouldn't be a problem.
195 real_seconds
= nowtime
% 60;
196 real_minutes
= nowtime
/ 60;
197 if (((abs(real_minutes
- cmos_minutes
) + 15) / 30) & 1)
198 real_minutes
+= 30; /* correct for half hour time zone */
201 if (abs(real_minutes
- cmos_minutes
) < 30) {
202 BIN_TO_BCD(real_seconds
);
203 BIN_TO_BCD(real_minutes
);
204 CMOS_WRITE(real_seconds
, RTC_SECONDS
);
205 CMOS_WRITE(real_minutes
, RTC_MINUTES
);
207 printk(KERN_WARNING
"time.c: can't update CMOS clock "
208 "from %d to %d\n", cmos_minutes
, real_minutes
);
211 * The following flags have to be released exactly in this order, otherwise the
212 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
213 * not reset the oscillator and will not update precisely 500 ms later. You
214 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
215 * believes data sheets anyway ... -- Markus Kuhn
218 CMOS_WRITE(control
, RTC_CONTROL
);
219 CMOS_WRITE(freq_select
, RTC_FREQ_SELECT
);
221 spin_unlock(&rtc_lock
);
225 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
226 * Note: This function is required to return accurate
227 * time even in the absence of multiple timer ticks.
229 unsigned long long monotonic_clock(void)
232 u32 last_offset
, this_offset
, offset
;
233 unsigned long long base
;
235 if (vxtime
.mode
== VXTIME_HPET
) {
237 seq
= read_seqbegin(&xtime_lock
);
239 last_offset
= vxtime
.last
;
240 base
= monotonic_base
;
241 this_offset
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
243 } while (read_seqretry(&xtime_lock
, seq
));
244 offset
= (this_offset
- last_offset
);
245 offset
*=(NSEC_PER_SEC
/HZ
)/hpet_tick
;
246 return base
+ offset
;
249 seq
= read_seqbegin(&xtime_lock
);
251 last_offset
= vxtime
.last_tsc
;
252 base
= monotonic_base
;
253 } while (read_seqretry(&xtime_lock
, seq
));
255 rdtscll(this_offset
);
256 offset
= (this_offset
- last_offset
)*1000/cpu_khz
;
257 return base
+ offset
;
262 EXPORT_SYMBOL(monotonic_clock
);
265 static irqreturn_t
timer_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
267 static unsigned long rtc_update
= 0;
268 unsigned long tsc
, lost
= 0;
269 int delay
, offset
= 0;
272 * Here we are in the timer irq handler. We have irqs locally disabled (so we
273 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
274 * on the other CPU, so we need a lock. We also need to lock the vsyscall
275 * variables, because both do_timer() and us change them -arca+vojtech
278 write_seqlock(&xtime_lock
);
280 if (vxtime
.hpet_address
) {
281 offset
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
282 delay
= hpet_readl(HPET_COUNTER
) - offset
;
284 spin_lock(&i8253_lock
);
287 delay
|= inb(0x40) << 8;
288 spin_unlock(&i8253_lock
);
289 delay
= LATCH
- 1 - delay
;
294 if (vxtime
.mode
== VXTIME_HPET
) {
295 if (offset
- vxtime
.last
> hpet_tick
) {
296 lost
= (offset
- vxtime
.last
) / hpet_tick
- 1;
300 (offset
- vxtime
.last
)*(NSEC_PER_SEC
/HZ
) / hpet_tick
;
302 vxtime
.last
= offset
;
304 offset
= (((tsc
- vxtime
.last_tsc
) *
305 vxtime
.tsc_quot
) >> 32) - (USEC_PER_SEC
/ HZ
);
310 if (offset
> (USEC_PER_SEC
/ HZ
)) {
311 lost
= offset
/ (USEC_PER_SEC
/ HZ
);
312 offset
%= (USEC_PER_SEC
/ HZ
);
315 monotonic_base
+= (tsc
- vxtime
.last_tsc
)*1000000/cpu_khz
;
317 vxtime
.last_tsc
= tsc
- vxtime
.quot
* delay
/ vxtime
.tsc_quot
;
319 if ((((tsc
- vxtime
.last_tsc
) *
320 vxtime
.tsc_quot
) >> 32) < offset
)
321 vxtime
.last_tsc
= tsc
-
322 (((long) offset
<< 32) / vxtime
.tsc_quot
) - 1;
326 if (report_lost_ticks
)
327 printk(KERN_WARNING
"time.c: Lost %ld timer "
328 "tick(s)! (rip %016lx)\n",
329 (offset
- vxtime
.last
) / hpet_tick
- 1,
335 * Do the timer stuff.
341 * In the SMP case we use the local APIC timer interrupt to do the profiling,
342 * except when we simulate SMP mode on a uniprocessor system, in that case we
343 * have to call the local interrupt handler.
346 #ifndef CONFIG_X86_LOCAL_APIC
347 x86_do_profile(regs
);
349 if (!using_apic_timer
)
350 smp_local_timer_interrupt(regs
);
354 * If we have an externally synchronized Linux clock, then update CMOS clock
355 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
356 * closest to exactly 500 ms before the next second. If the update fails, we
357 * don't care, as it'll be updated on the next turn, and the problem (time way
358 * off) isn't likely to go away much sooner anyway.
361 if ((~time_status
& STA_UNSYNC
) && xtime
.tv_sec
> rtc_update
&&
362 abs(xtime
.tv_nsec
- 500000000) <= tick_nsec
/ 2) {
363 set_rtc_mmss(xtime
.tv_sec
);
364 rtc_update
= xtime
.tv_sec
+ 660;
367 write_sequnlock(&xtime_lock
);
372 unsigned long get_cmos_time(void)
374 unsigned int timeout
, year
, mon
, day
, hour
, min
, sec
;
375 unsigned char last
, this;
379 * The Linux interpretation of the CMOS clock register contents: When the
380 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
381 * second which has precisely just started. Waiting for this can take up to 1
382 * second, we timeout approximately after 2.4 seconds on a machine with
383 * standard 8.3 MHz ISA bus.
386 spin_lock_irqsave(&rtc_lock
, flags
);
391 while (timeout
&& last
&& !this) {
393 this = CMOS_READ(RTC_FREQ_SELECT
) & RTC_UIP
;
398 * Here we are safe to assume the registers won't change for a whole second, so
399 * we just go ahead and read them.
402 sec
= CMOS_READ(RTC_SECONDS
);
403 min
= CMOS_READ(RTC_MINUTES
);
404 hour
= CMOS_READ(RTC_HOURS
);
405 day
= CMOS_READ(RTC_DAY_OF_MONTH
);
406 mon
= CMOS_READ(RTC_MONTH
);
407 year
= CMOS_READ(RTC_YEAR
);
409 spin_unlock_irqrestore(&rtc_lock
, flags
);
412 * We know that x86-64 always uses BCD format, no need to check the config
424 * This will work up to Dec 31, 2069.
427 if ((year
+= 1900) < 1970)
430 return mktime(year
, mon
, day
, hour
, min
, sec
);
434 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
435 * it to the HPET timer of known frequency.
438 #define TICK_COUNT 100000000
440 static unsigned int __init
hpet_calibrate_tsc(void)
442 int tsc_start
, hpet_start
;
443 int tsc_now
, hpet_now
;
446 local_irq_save(flags
);
449 hpet_start
= hpet_readl(HPET_COUNTER
);
454 hpet_now
= hpet_readl(HPET_COUNTER
);
457 local_irq_restore(flags
);
458 } while ((tsc_now
- tsc_start
) < TICK_COUNT
&&
459 (hpet_now
- hpet_start
) < TICK_COUNT
);
461 return (tsc_now
- tsc_start
) * 1000000000L
462 / ((hpet_now
- hpet_start
) * hpet_period
/ 1000);
467 * pit_calibrate_tsc() uses the speaker output (channel 2) of
468 * the PIT. This is better than using the timer interrupt output,
469 * because we can read the value of the speaker with just one inb(),
470 * where we need three i/o operations for the interrupt channel.
471 * We count how many ticks the TSC does in 50 ms.
474 static unsigned int __init
pit_calibrate_tsc(void)
476 unsigned long start
, end
;
479 spin_lock_irqsave(&i8253_lock
, flags
);
481 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
484 outb((1193182 / (1000 / 50)) & 0xff, 0x42);
485 outb((1193182 / (1000 / 50)) >> 8, 0x42);
488 while ((inb(0x61) & 0x20) == 0);
492 spin_unlock_irqrestore(&i8253_lock
, flags
);
494 return (end
- start
) / 50;
497 static int hpet_init(void)
499 unsigned int cfg
, id
;
501 if (!vxtime
.hpet_address
)
503 set_fixmap_nocache(FIX_HPET_BASE
, vxtime
.hpet_address
);
506 * Read the period, compute tick and quotient.
509 id
= hpet_readl(HPET_ID
);
511 if (!(id
& HPET_ID_VENDOR
) || !(id
& HPET_ID_NUMBER
) ||
512 !(id
& HPET_ID_LEGSUP
))
515 hpet_period
= hpet_readl(HPET_PERIOD
);
516 if (hpet_period
< 100000 || hpet_period
> 100000000)
519 hpet_tick
= (1000000000L * (USEC_PER_SEC
/ HZ
) + hpet_period
/ 2) /
523 * Stop the timers and reset the main counter.
526 cfg
= hpet_readl(HPET_CFG
);
527 cfg
&= ~(HPET_CFG_ENABLE
| HPET_CFG_LEGACY
);
528 hpet_writel(cfg
, HPET_CFG
);
529 hpet_writel(0, HPET_COUNTER
);
530 hpet_writel(0, HPET_COUNTER
+ 4);
533 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
534 * and period also hpet_tick.
537 hpet_writel(HPET_T0_ENABLE
| HPET_T0_PERIODIC
| HPET_T0_SETVAL
|
538 HPET_T0_32BIT
, HPET_T0_CFG
);
539 hpet_writel(hpet_tick
, HPET_T0_CMP
);
540 hpet_writel(hpet_tick
, HPET_T0_CMP
);
546 cfg
|= HPET_CFG_ENABLE
| HPET_CFG_LEGACY
;
547 hpet_writel(cfg
, HPET_CFG
);
552 void __init
pit_init(void)
556 spin_lock_irqsave(&i8253_lock
, flags
);
557 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
558 outb_p(LATCH
& 0xff, 0x40); /* LSB */
559 outb_p(LATCH
>> 8, 0x40); /* MSB */
560 spin_unlock_irqrestore(&i8253_lock
, flags
);
563 int __init
time_setup(char *str
)
565 report_lost_ticks
= 1;
569 static struct irqaction irq0
= {
570 timer_interrupt
, SA_INTERRUPT
, 0, "timer", NULL
, NULL
573 extern void __init
config_acpi_tables(void);
575 void __init
time_init(void)
579 #ifdef HPET_HACK_ENABLE_DANGEROUS
580 if (!vxtime
.hpet_address
) {
581 printk(KERN_WARNING
"time.c: WARNING: Enabling HPET base "
583 outl(0x800038a0, 0xcf8);
584 outl(0xff000001, 0xcfc);
585 outl(0x800038a0, 0xcf8);
586 hpet_address
= inl(0xcfc) & 0xfffffffe;
587 printk(KERN_WARNING
"time.c: WARNING: Enabled HPET "
588 "at %#lx.\n", hpet_address
);
592 xtime
.tv_sec
= get_cmos_time();
595 set_normalized_timespec(&wall_to_monotonic
,
596 -xtime
.tv_sec
, -xtime
.tv_nsec
);
599 vxtime_hz
= (1000000000000000L + hpet_period
/ 2) /
601 cpu_khz
= hpet_calibrate_tsc();
605 cpu_khz
= pit_calibrate_tsc();
609 printk(KERN_INFO
"time.c: Using %ld.%06ld MHz %s timer.\n",
610 vxtime_hz
/ 1000000, vxtime_hz
% 1000000, timename
);
611 printk(KERN_INFO
"time.c: Detected %d.%03d MHz processor.\n",
612 cpu_khz
/ 1000, cpu_khz
% 1000);
613 vxtime
.mode
= VXTIME_TSC
;
614 vxtime
.quot
= (1000000L << 32) / vxtime_hz
;
615 vxtime
.tsc_quot
= (1000L << 32) / cpu_khz
;
616 vxtime
.hz
= vxtime_hz
;
617 rdtscll_sync(&vxtime
.last_tsc
);
621 void __init
time_init_smp(void)
625 if (vxtime
.hpet_address
) {
627 vxtime
.last
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
628 vxtime
.mode
= VXTIME_HPET
;
629 do_gettimeoffset
= do_gettimeoffset_hpet
;
631 timetype
= "PIT/TSC";
632 vxtime
.mode
= VXTIME_TSC
;
634 printk(KERN_INFO
"time.c: Using %s based timekeeping.\n", timetype
);
637 __setup("report_lost_ticks", time_setup
);