[PATCH] amd64 time.c __iomem annotations
[linux-2.6/x86.git] / arch / x86_64 / kernel / time.c
blobdba7237be5c1c2b2b2e8ae28a5ca5a5ecf34aa65
1 /*
2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
29 #ifdef CONFIG_ACPI
30 #include <acpi/achware.h> /* for PM timer frequency */
31 #endif
32 #include <asm/8253pit.h>
33 #include <asm/pgtable.h>
34 #include <asm/vsyscall.h>
35 #include <asm/timex.h>
36 #include <asm/proto.h>
37 #include <asm/hpet.h>
38 #include <asm/sections.h>
39 #include <linux/cpufreq.h>
40 #include <linux/hpet.h>
41 #ifdef CONFIG_X86_LOCAL_APIC
42 #include <asm/apic.h>
43 #endif
45 #ifdef CONFIG_CPU_FREQ
46 static void cpufreq_delayed_get(void);
47 #endif
48 extern void i8254_timer_resume(void);
49 extern int using_apic_timer;
51 DEFINE_SPINLOCK(rtc_lock);
52 DEFINE_SPINLOCK(i8253_lock);
54 int nohpet __initdata = 0;
55 static int notsc __initdata = 0;
57 #undef HPET_HACK_ENABLE_DANGEROUS
59 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
60 static unsigned long hpet_period; /* fsecs / HPET clock */
61 unsigned long hpet_tick; /* HPET clocks / interrupt */
62 static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
63 unsigned long vxtime_hz = PIT_TICK_RATE;
64 int report_lost_ticks; /* command line option */
65 unsigned long long monotonic_base;
67 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
69 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71 struct timespec __xtime __section_xtime;
72 struct timezone __sys_tz __section_sys_tz;
75 * do_gettimeoffset() returns microseconds since last timer interrupt was
76 * triggered by hardware. A memory read of HPET is slower than a register read
77 * of TSC, but much more reliable. It's also synchronized to the timer
78 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
79 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
80 * This is not a problem, because jiffies hasn't updated either. They are bound
81 * together by xtime_lock.
84 static inline unsigned int do_gettimeoffset_tsc(void)
86 unsigned long t;
87 unsigned long x;
88 t = get_cycles_sync();
89 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
90 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
91 return x;
94 static inline unsigned int do_gettimeoffset_hpet(void)
96 /* cap counter read to one tick to avoid inconsistencies */
97 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
98 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
101 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
104 * This version of gettimeofday() has microsecond resolution and better than
105 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
106 * MHz) HPET timer.
109 void do_gettimeofday(struct timeval *tv)
111 unsigned long seq, t;
112 unsigned int sec, usec;
114 do {
115 seq = read_seqbegin(&xtime_lock);
117 sec = xtime.tv_sec;
118 usec = xtime.tv_nsec / 1000;
120 /* i386 does some correction here to keep the clock
121 monotonous even when ntpd is fixing drift.
122 But they didn't work for me, there is a non monotonic
123 clock anyways with ntp.
124 I dropped all corrections now until a real solution can
125 be found. Note when you fix it here you need to do the same
126 in arch/x86_64/kernel/vsyscall.c and export all needed
127 variables in vmlinux.lds. -AK */
129 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
130 do_gettimeoffset();
131 usec += t;
133 } while (read_seqretry(&xtime_lock, seq));
135 tv->tv_sec = sec + usec / 1000000;
136 tv->tv_usec = usec % 1000000;
139 EXPORT_SYMBOL(do_gettimeofday);
142 * settimeofday() first undoes the correction that gettimeofday would do
143 * on the time, and then saves it. This is ugly, but has been like this for
144 * ages already.
147 int do_settimeofday(struct timespec *tv)
149 time_t wtm_sec, sec = tv->tv_sec;
150 long wtm_nsec, nsec = tv->tv_nsec;
152 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
153 return -EINVAL;
155 write_seqlock_irq(&xtime_lock);
157 nsec -= do_gettimeoffset() * 1000 +
158 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
160 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
161 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
163 set_normalized_timespec(&xtime, sec, nsec);
164 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
166 ntp_clear();
168 write_sequnlock_irq(&xtime_lock);
169 clock_was_set();
170 return 0;
173 EXPORT_SYMBOL(do_settimeofday);
175 unsigned long profile_pc(struct pt_regs *regs)
177 unsigned long pc = instruction_pointer(regs);
179 /* Assume the lock function has either no stack frame or only a single word.
180 This checks if the address on the stack looks like a kernel text address.
181 There is a small window for false hits, but in that case the tick
182 is just accounted to the spinlock function.
183 Better would be to write these functions in assembler again
184 and check exactly. */
185 if (in_lock_functions(pc)) {
186 char *v = *(char **)regs->rsp;
187 if ((v >= _stext && v <= _etext) ||
188 (v >= _sinittext && v <= _einittext) ||
189 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
190 return (unsigned long)v;
191 return ((unsigned long *)regs->rsp)[1];
193 return pc;
195 EXPORT_SYMBOL(profile_pc);
198 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
199 * ms after the second nowtime has started, because when nowtime is written
200 * into the registers of the CMOS clock, it will jump to the next second
201 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
202 * sheet for details.
205 static void set_rtc_mmss(unsigned long nowtime)
207 int real_seconds, real_minutes, cmos_minutes;
208 unsigned char control, freq_select;
211 * IRQs are disabled when we're called from the timer interrupt,
212 * no need for spin_lock_irqsave()
215 spin_lock(&rtc_lock);
218 * Tell the clock it's being set and stop it.
221 control = CMOS_READ(RTC_CONTROL);
222 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
224 freq_select = CMOS_READ(RTC_FREQ_SELECT);
225 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
227 cmos_minutes = CMOS_READ(RTC_MINUTES);
228 BCD_TO_BIN(cmos_minutes);
231 * since we're only adjusting minutes and seconds, don't interfere with hour
232 * overflow. This avoids messing with unknown time zones but requires your RTC
233 * not to be off by more than 15 minutes. Since we're calling it only when
234 * our clock is externally synchronized using NTP, this shouldn't be a problem.
237 real_seconds = nowtime % 60;
238 real_minutes = nowtime / 60;
239 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
240 real_minutes += 30; /* correct for half hour time zone */
241 real_minutes %= 60;
243 #if 0
244 /* AMD 8111 is a really bad time keeper and hits this regularly.
245 It probably was an attempt to avoid screwing up DST, but ignore
246 that for now. */
247 if (abs(real_minutes - cmos_minutes) >= 30) {
248 printk(KERN_WARNING "time.c: can't update CMOS clock "
249 "from %d to %d\n", cmos_minutes, real_minutes);
250 } else
251 #endif
254 BIN_TO_BCD(real_seconds);
255 BIN_TO_BCD(real_minutes);
256 CMOS_WRITE(real_seconds, RTC_SECONDS);
257 CMOS_WRITE(real_minutes, RTC_MINUTES);
261 * The following flags have to be released exactly in this order, otherwise the
262 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
263 * not reset the oscillator and will not update precisely 500 ms later. You
264 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
265 * believes data sheets anyway ... -- Markus Kuhn
268 CMOS_WRITE(control, RTC_CONTROL);
269 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
271 spin_unlock(&rtc_lock);
275 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
276 * Note: This function is required to return accurate
277 * time even in the absence of multiple timer ticks.
279 unsigned long long monotonic_clock(void)
281 unsigned long seq;
282 u32 last_offset, this_offset, offset;
283 unsigned long long base;
285 if (vxtime.mode == VXTIME_HPET) {
286 do {
287 seq = read_seqbegin(&xtime_lock);
289 last_offset = vxtime.last;
290 base = monotonic_base;
291 this_offset = hpet_readl(HPET_COUNTER);
292 } while (read_seqretry(&xtime_lock, seq));
293 offset = (this_offset - last_offset);
294 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
295 return base + offset;
296 } else {
297 do {
298 seq = read_seqbegin(&xtime_lock);
300 last_offset = vxtime.last_tsc;
301 base = monotonic_base;
302 } while (read_seqretry(&xtime_lock, seq));
303 this_offset = get_cycles_sync();
304 offset = (this_offset - last_offset)*1000/cpu_khz;
305 return base + offset;
308 EXPORT_SYMBOL(monotonic_clock);
310 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
312 static long lost_count;
313 static int warned;
315 if (report_lost_ticks) {
316 printk(KERN_WARNING "time.c: Lost %d timer "
317 "tick(s)! ", lost);
318 print_symbol("rip %s)\n", regs->rip);
321 if (lost_count == 1000 && !warned) {
322 printk(KERN_WARNING
323 "warning: many lost ticks.\n"
324 KERN_WARNING "Your time source seems to be instable or "
325 "some driver is hogging interupts\n");
326 print_symbol("rip %s\n", regs->rip);
327 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
328 printk(KERN_WARNING "Falling back to HPET\n");
329 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
330 vxtime.mode = VXTIME_HPET;
331 do_gettimeoffset = do_gettimeoffset_hpet;
333 /* else should fall back to PIT, but code missing. */
334 warned = 1;
335 } else
336 lost_count++;
338 #ifdef CONFIG_CPU_FREQ
339 /* In some cases the CPU can change frequency without us noticing
340 (like going into thermal throttle)
341 Give cpufreq a change to catch up. */
342 if ((lost_count+1) % 25 == 0) {
343 cpufreq_delayed_get();
345 #endif
348 void main_timer_handler(struct pt_regs *regs)
350 static unsigned long rtc_update = 0;
351 unsigned long tsc;
352 int delay, offset = 0, lost = 0;
355 * Here we are in the timer irq handler. We have irqs locally disabled (so we
356 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
357 * on the other CPU, so we need a lock. We also need to lock the vsyscall
358 * variables, because both do_timer() and us change them -arca+vojtech
361 write_seqlock(&xtime_lock);
363 if (vxtime.hpet_address)
364 offset = hpet_readl(HPET_COUNTER);
366 if (hpet_use_timer) {
367 /* if we're using the hpet timer functionality,
368 * we can more accurately know the counter value
369 * when the timer interrupt occured.
371 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
372 delay = hpet_readl(HPET_COUNTER) - offset;
373 } else {
374 spin_lock(&i8253_lock);
375 outb_p(0x00, 0x43);
376 delay = inb_p(0x40);
377 delay |= inb(0x40) << 8;
378 spin_unlock(&i8253_lock);
379 delay = LATCH - 1 - delay;
382 tsc = get_cycles_sync();
384 if (vxtime.mode == VXTIME_HPET) {
385 if (offset - vxtime.last > hpet_tick) {
386 lost = (offset - vxtime.last) / hpet_tick - 1;
389 monotonic_base +=
390 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
392 vxtime.last = offset;
393 #ifdef CONFIG_X86_PM_TIMER
394 } else if (vxtime.mode == VXTIME_PMTMR) {
395 lost = pmtimer_mark_offset();
396 #endif
397 } else {
398 offset = (((tsc - vxtime.last_tsc) *
399 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
401 if (offset < 0)
402 offset = 0;
404 if (offset > (USEC_PER_SEC / HZ)) {
405 lost = offset / (USEC_PER_SEC / HZ);
406 offset %= (USEC_PER_SEC / HZ);
409 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
411 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
413 if ((((tsc - vxtime.last_tsc) *
414 vxtime.tsc_quot) >> 32) < offset)
415 vxtime.last_tsc = tsc -
416 (((long) offset << 32) / vxtime.tsc_quot) - 1;
419 if (lost > 0) {
420 handle_lost_ticks(lost, regs);
421 jiffies += lost;
425 * Do the timer stuff.
428 do_timer(regs);
429 #ifndef CONFIG_SMP
430 update_process_times(user_mode(regs));
431 #endif
434 * In the SMP case we use the local APIC timer interrupt to do the profiling,
435 * except when we simulate SMP mode on a uniprocessor system, in that case we
436 * have to call the local interrupt handler.
439 #ifndef CONFIG_X86_LOCAL_APIC
440 profile_tick(CPU_PROFILING, regs);
441 #else
442 if (!using_apic_timer)
443 smp_local_timer_interrupt(regs);
444 #endif
447 * If we have an externally synchronized Linux clock, then update CMOS clock
448 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
449 * closest to exactly 500 ms before the next second. If the update fails, we
450 * don't care, as it'll be updated on the next turn, and the problem (time way
451 * off) isn't likely to go away much sooner anyway.
454 if (ntp_synced() && xtime.tv_sec > rtc_update &&
455 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
456 set_rtc_mmss(xtime.tv_sec);
457 rtc_update = xtime.tv_sec + 660;
460 write_sequnlock(&xtime_lock);
463 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
465 if (apic_runs_main_timer > 1)
466 return IRQ_HANDLED;
467 main_timer_handler(regs);
468 #ifdef CONFIG_X86_LOCAL_APIC
469 if (using_apic_timer)
470 smp_send_timer_broadcast_ipi();
471 #endif
472 return IRQ_HANDLED;
475 static unsigned int cyc2ns_scale;
476 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
478 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
480 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
483 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
485 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
488 unsigned long long sched_clock(void)
490 unsigned long a = 0;
492 #if 0
493 /* Don't do a HPET read here. Using TSC always is much faster
494 and HPET may not be mapped yet when the scheduler first runs.
495 Disadvantage is a small drift between CPUs in some configurations,
496 but that should be tolerable. */
497 if (__vxtime.mode == VXTIME_HPET)
498 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
499 #endif
501 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
502 which means it is not completely exact and may not be monotonous between
503 CPUs. But the errors should be too small to matter for scheduling
504 purposes. */
506 rdtscll(a);
507 return cycles_2_ns(a);
510 static unsigned long get_cmos_time(void)
512 unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
513 unsigned char uip = 0, this = 0;
514 unsigned long flags;
517 * The Linux interpretation of the CMOS clock register contents: When the
518 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
519 * second which has precisely just started. Waiting for this can take up to 1
520 * second, we timeout approximately after 2.4 seconds on a machine with
521 * standard 8.3 MHz ISA bus.
524 spin_lock_irqsave(&rtc_lock, flags);
526 while (timeout && (!uip || this)) {
527 uip |= this;
528 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
529 timeout--;
533 * Here we are safe to assume the registers won't change for a whole
534 * second, so we just go ahead and read them.
536 sec = CMOS_READ(RTC_SECONDS);
537 min = CMOS_READ(RTC_MINUTES);
538 hour = CMOS_READ(RTC_HOURS);
539 day = CMOS_READ(RTC_DAY_OF_MONTH);
540 mon = CMOS_READ(RTC_MONTH);
541 year = CMOS_READ(RTC_YEAR);
543 spin_unlock_irqrestore(&rtc_lock, flags);
546 * We know that x86-64 always uses BCD format, no need to check the
547 * config register.
550 BCD_TO_BIN(sec);
551 BCD_TO_BIN(min);
552 BCD_TO_BIN(hour);
553 BCD_TO_BIN(day);
554 BCD_TO_BIN(mon);
555 BCD_TO_BIN(year);
558 * x86-64 systems only exists since 2002.
559 * This will work up to Dec 31, 2100
561 year += 2000;
563 return mktime(year, mon, day, hour, min, sec);
566 #ifdef CONFIG_CPU_FREQ
568 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
569 changes.
571 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
572 not that important because current Opteron setups do not support
573 scaling on SMP anyroads.
575 Should fix up last_tsc too. Currently gettimeofday in the
576 first tick after the change will be slightly wrong. */
578 #include <linux/workqueue.h>
580 static unsigned int cpufreq_delayed_issched = 0;
581 static unsigned int cpufreq_init = 0;
582 static struct work_struct cpufreq_delayed_get_work;
584 static void handle_cpufreq_delayed_get(void *v)
586 unsigned int cpu;
587 for_each_online_cpu(cpu) {
588 cpufreq_get(cpu);
590 cpufreq_delayed_issched = 0;
593 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
594 * to verify the CPU frequency the timing core thinks the CPU is running
595 * at is still correct.
597 static void cpufreq_delayed_get(void)
599 static int warned;
600 if (cpufreq_init && !cpufreq_delayed_issched) {
601 cpufreq_delayed_issched = 1;
602 if (!warned) {
603 warned = 1;
604 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
606 schedule_work(&cpufreq_delayed_get_work);
610 static unsigned int ref_freq = 0;
611 static unsigned long loops_per_jiffy_ref = 0;
613 static unsigned long cpu_khz_ref = 0;
615 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
616 void *data)
618 struct cpufreq_freqs *freq = data;
619 unsigned long *lpj, dummy;
621 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
622 return 0;
624 lpj = &dummy;
625 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
626 #ifdef CONFIG_SMP
627 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
628 #else
629 lpj = &boot_cpu_data.loops_per_jiffy;
630 #endif
632 if (!ref_freq) {
633 ref_freq = freq->old;
634 loops_per_jiffy_ref = *lpj;
635 cpu_khz_ref = cpu_khz;
637 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
638 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
639 (val == CPUFREQ_RESUMECHANGE)) {
640 *lpj =
641 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
643 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
644 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
645 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
648 set_cyc2ns_scale(cpu_khz_ref);
650 return 0;
653 static struct notifier_block time_cpufreq_notifier_block = {
654 .notifier_call = time_cpufreq_notifier
657 static int __init cpufreq_tsc(void)
659 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
660 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
661 CPUFREQ_TRANSITION_NOTIFIER))
662 cpufreq_init = 1;
663 return 0;
666 core_initcall(cpufreq_tsc);
668 #endif
671 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
672 * it to the HPET timer of known frequency.
675 #define TICK_COUNT 100000000
677 static unsigned int __init hpet_calibrate_tsc(void)
679 int tsc_start, hpet_start;
680 int tsc_now, hpet_now;
681 unsigned long flags;
683 local_irq_save(flags);
684 local_irq_disable();
686 hpet_start = hpet_readl(HPET_COUNTER);
687 rdtscl(tsc_start);
689 do {
690 local_irq_disable();
691 hpet_now = hpet_readl(HPET_COUNTER);
692 tsc_now = get_cycles_sync();
693 local_irq_restore(flags);
694 } while ((tsc_now - tsc_start) < TICK_COUNT &&
695 (hpet_now - hpet_start) < TICK_COUNT);
697 return (tsc_now - tsc_start) * 1000000000L
698 / ((hpet_now - hpet_start) * hpet_period / 1000);
703 * pit_calibrate_tsc() uses the speaker output (channel 2) of
704 * the PIT. This is better than using the timer interrupt output,
705 * because we can read the value of the speaker with just one inb(),
706 * where we need three i/o operations for the interrupt channel.
707 * We count how many ticks the TSC does in 50 ms.
710 static unsigned int __init pit_calibrate_tsc(void)
712 unsigned long start, end;
713 unsigned long flags;
715 spin_lock_irqsave(&i8253_lock, flags);
717 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
719 outb(0xb0, 0x43);
720 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
721 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
722 start = get_cycles_sync();
723 while ((inb(0x61) & 0x20) == 0);
724 end = get_cycles_sync();
726 spin_unlock_irqrestore(&i8253_lock, flags);
728 return (end - start) / 50;
731 #ifdef CONFIG_HPET
732 static __init int late_hpet_init(void)
734 struct hpet_data hd;
735 unsigned int ntimer;
737 if (!vxtime.hpet_address)
738 return -1;
740 memset(&hd, 0, sizeof (hd));
742 ntimer = hpet_readl(HPET_ID);
743 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
744 ntimer++;
747 * Register with driver.
748 * Timer0 and Timer1 is used by platform.
750 hd.hd_phys_address = vxtime.hpet_address;
751 hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
752 hd.hd_nirqs = ntimer;
753 hd.hd_flags = HPET_DATA_PLATFORM;
754 hpet_reserve_timer(&hd, 0);
755 #ifdef CONFIG_HPET_EMULATE_RTC
756 hpet_reserve_timer(&hd, 1);
757 #endif
758 hd.hd_irq[0] = HPET_LEGACY_8254;
759 hd.hd_irq[1] = HPET_LEGACY_RTC;
760 if (ntimer > 2) {
761 struct hpet *hpet;
762 struct hpet_timer *timer;
763 int i;
765 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
767 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
768 timer++, i++)
769 hd.hd_irq[i] = (timer->hpet_config &
770 Tn_INT_ROUTE_CNF_MASK) >>
771 Tn_INT_ROUTE_CNF_SHIFT;
775 hpet_alloc(&hd);
776 return 0;
778 fs_initcall(late_hpet_init);
779 #endif
781 static int hpet_timer_stop_set_go(unsigned long tick)
783 unsigned int cfg;
786 * Stop the timers and reset the main counter.
789 cfg = hpet_readl(HPET_CFG);
790 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
791 hpet_writel(cfg, HPET_CFG);
792 hpet_writel(0, HPET_COUNTER);
793 hpet_writel(0, HPET_COUNTER + 4);
796 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
797 * and period also hpet_tick.
799 if (hpet_use_timer) {
800 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
801 HPET_TN_32BIT, HPET_T0_CFG);
802 hpet_writel(hpet_tick, HPET_T0_CMP);
803 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
804 cfg |= HPET_CFG_LEGACY;
807 * Go!
810 cfg |= HPET_CFG_ENABLE;
811 hpet_writel(cfg, HPET_CFG);
813 return 0;
816 static int hpet_init(void)
818 unsigned int id;
820 if (!vxtime.hpet_address)
821 return -1;
822 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
823 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
826 * Read the period, compute tick and quotient.
829 id = hpet_readl(HPET_ID);
831 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
832 return -1;
834 hpet_period = hpet_readl(HPET_PERIOD);
835 if (hpet_period < 100000 || hpet_period > 100000000)
836 return -1;
838 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
839 hpet_period;
841 hpet_use_timer = (id & HPET_ID_LEGSUP);
843 return hpet_timer_stop_set_go(hpet_tick);
846 static int hpet_reenable(void)
848 return hpet_timer_stop_set_go(hpet_tick);
851 #define PIT_MODE 0x43
852 #define PIT_CH0 0x40
854 static void __init __pit_init(int val, u8 mode)
856 unsigned long flags;
858 spin_lock_irqsave(&i8253_lock, flags);
859 outb_p(mode, PIT_MODE);
860 outb_p(val & 0xff, PIT_CH0); /* LSB */
861 outb_p(val >> 8, PIT_CH0); /* MSB */
862 spin_unlock_irqrestore(&i8253_lock, flags);
865 void __init pit_init(void)
867 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
870 void __init pit_stop_interrupt(void)
872 __pit_init(0, 0x30); /* mode 0 */
875 void __init stop_timer_interrupt(void)
877 char *name;
878 if (vxtime.hpet_address) {
879 name = "HPET";
880 hpet_timer_stop_set_go(0);
881 } else {
882 name = "PIT";
883 pit_stop_interrupt();
885 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
888 int __init time_setup(char *str)
890 report_lost_ticks = 1;
891 return 1;
894 static struct irqaction irq0 = {
895 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
898 void __init time_init(void)
900 char *timename;
902 #ifdef HPET_HACK_ENABLE_DANGEROUS
903 if (!vxtime.hpet_address) {
904 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
905 "manually!\n");
906 outl(0x800038a0, 0xcf8);
907 outl(0xff000001, 0xcfc);
908 outl(0x800038a0, 0xcf8);
909 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
910 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
911 "at %#lx.\n", vxtime.hpet_address);
913 #endif
914 if (nohpet)
915 vxtime.hpet_address = 0;
917 xtime.tv_sec = get_cmos_time();
918 xtime.tv_nsec = 0;
920 set_normalized_timespec(&wall_to_monotonic,
921 -xtime.tv_sec, -xtime.tv_nsec);
923 if (!hpet_init())
924 vxtime_hz = (1000000000000000L + hpet_period / 2) /
925 hpet_period;
926 else
927 vxtime.hpet_address = 0;
929 if (hpet_use_timer) {
930 cpu_khz = hpet_calibrate_tsc();
931 timename = "HPET";
932 #ifdef CONFIG_X86_PM_TIMER
933 } else if (pmtmr_ioport && !vxtime.hpet_address) {
934 vxtime_hz = PM_TIMER_FREQUENCY;
935 timename = "PM";
936 pit_init();
937 cpu_khz = pit_calibrate_tsc();
938 #endif
939 } else {
940 pit_init();
941 cpu_khz = pit_calibrate_tsc();
942 timename = "PIT";
945 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
946 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
947 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
948 cpu_khz / 1000, cpu_khz % 1000);
949 vxtime.mode = VXTIME_TSC;
950 vxtime.quot = (1000000L << 32) / vxtime_hz;
951 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
952 vxtime.last_tsc = get_cycles_sync();
953 setup_irq(0, &irq0);
955 set_cyc2ns_scale(cpu_khz);
957 #ifndef CONFIG_SMP
958 time_init_gtod();
959 #endif
963 * Make an educated guess if the TSC is trustworthy and synchronized
964 * over all CPUs.
966 __cpuinit int unsynchronized_tsc(void)
968 #ifdef CONFIG_SMP
969 if (oem_force_hpet_timer())
970 return 1;
971 /* Intel systems are normally all synchronized. Exceptions
972 are handled in the OEM check above. */
973 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
974 return 0;
975 #endif
976 /* Assume multi socket systems are not synchronized */
977 return num_present_cpus() > 1;
981 * Decide after all CPUs are booted what mode gettimeofday should use.
983 void __init time_init_gtod(void)
985 char *timetype;
987 if (unsynchronized_tsc())
988 notsc = 1;
989 if (vxtime.hpet_address && notsc) {
990 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
991 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
992 vxtime.mode = VXTIME_HPET;
993 do_gettimeoffset = do_gettimeoffset_hpet;
994 #ifdef CONFIG_X86_PM_TIMER
995 /* Using PM for gettimeofday is quite slow, but we have no other
996 choice because the TSC is too unreliable on some systems. */
997 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
998 timetype = "PM";
999 do_gettimeoffset = do_gettimeoffset_pm;
1000 vxtime.mode = VXTIME_PMTMR;
1001 sysctl_vsyscall = 0;
1002 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
1003 #endif
1004 } else {
1005 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
1006 vxtime.mode = VXTIME_TSC;
1009 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
1012 __setup("report_lost_ticks", time_setup);
1014 static long clock_cmos_diff;
1015 static unsigned long sleep_start;
1018 * sysfs support for the timer.
1021 static int timer_suspend(struct sys_device *dev, pm_message_t state)
1024 * Estimate time zone so that set_time can update the clock
1026 long cmos_time = get_cmos_time();
1028 clock_cmos_diff = -cmos_time;
1029 clock_cmos_diff += get_seconds();
1030 sleep_start = cmos_time;
1031 return 0;
1034 static int timer_resume(struct sys_device *dev)
1036 unsigned long flags;
1037 unsigned long sec;
1038 unsigned long ctime = get_cmos_time();
1039 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1041 if (vxtime.hpet_address)
1042 hpet_reenable();
1043 else
1044 i8254_timer_resume();
1046 sec = ctime + clock_cmos_diff;
1047 write_seqlock_irqsave(&xtime_lock,flags);
1048 xtime.tv_sec = sec;
1049 xtime.tv_nsec = 0;
1050 if (vxtime.mode == VXTIME_HPET) {
1051 if (hpet_use_timer)
1052 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1053 else
1054 vxtime.last = hpet_readl(HPET_COUNTER);
1055 #ifdef CONFIG_X86_PM_TIMER
1056 } else if (vxtime.mode == VXTIME_PMTMR) {
1057 pmtimer_resume();
1058 #endif
1059 } else
1060 vxtime.last_tsc = get_cycles_sync();
1061 write_sequnlock_irqrestore(&xtime_lock,flags);
1062 jiffies += sleep_length;
1063 wall_jiffies += sleep_length;
1064 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1065 touch_softlockup_watchdog();
1066 return 0;
1069 static struct sysdev_class timer_sysclass = {
1070 .resume = timer_resume,
1071 .suspend = timer_suspend,
1072 set_kset_name("timer"),
1075 /* XXX this driverfs stuff should probably go elsewhere later -john */
1076 static struct sys_device device_timer = {
1077 .id = 0,
1078 .cls = &timer_sysclass,
1081 static int time_init_device(void)
1083 int error = sysdev_class_register(&timer_sysclass);
1084 if (!error)
1085 error = sysdev_register(&device_timer);
1086 return error;
1089 device_initcall(time_init_device);
1091 #ifdef CONFIG_HPET_EMULATE_RTC
1092 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1093 * is enabled, we support RTC interrupt functionality in software.
1094 * RTC has 3 kinds of interrupts:
1095 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1096 * is updated
1097 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1098 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1099 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1100 * (1) and (2) above are implemented using polling at a frequency of
1101 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1102 * overhead. (DEFAULT_RTC_INT_FREQ)
1103 * For (3), we use interrupts at 64Hz or user specified periodic
1104 * frequency, whichever is higher.
1106 #include <linux/rtc.h>
1108 #define DEFAULT_RTC_INT_FREQ 64
1109 #define RTC_NUM_INTS 1
1111 static unsigned long UIE_on;
1112 static unsigned long prev_update_sec;
1114 static unsigned long AIE_on;
1115 static struct rtc_time alarm_time;
1117 static unsigned long PIE_on;
1118 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1119 static unsigned long PIE_count;
1121 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1122 static unsigned int hpet_t1_cmp; /* cached comparator register */
1124 int is_hpet_enabled(void)
1126 return vxtime.hpet_address != 0;
1130 * Timer 1 for RTC, we do not use periodic interrupt feature,
1131 * even if HPET supports periodic interrupts on Timer 1.
1132 * The reason being, to set up a periodic interrupt in HPET, we need to
1133 * stop the main counter. And if we do that everytime someone diables/enables
1134 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1135 * So, for the time being, simulate the periodic interrupt in software.
1137 * hpet_rtc_timer_init() is called for the first time and during subsequent
1138 * interuppts reinit happens through hpet_rtc_timer_reinit().
1140 int hpet_rtc_timer_init(void)
1142 unsigned int cfg, cnt;
1143 unsigned long flags;
1145 if (!is_hpet_enabled())
1146 return 0;
1148 * Set the counter 1 and enable the interrupts.
1150 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1151 hpet_rtc_int_freq = PIE_freq;
1152 else
1153 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1155 local_irq_save(flags);
1156 cnt = hpet_readl(HPET_COUNTER);
1157 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1158 hpet_writel(cnt, HPET_T1_CMP);
1159 hpet_t1_cmp = cnt;
1160 local_irq_restore(flags);
1162 cfg = hpet_readl(HPET_T1_CFG);
1163 cfg &= ~HPET_TN_PERIODIC;
1164 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1165 hpet_writel(cfg, HPET_T1_CFG);
1167 return 1;
1170 static void hpet_rtc_timer_reinit(void)
1172 unsigned int cfg, cnt;
1174 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1175 cfg = hpet_readl(HPET_T1_CFG);
1176 cfg &= ~HPET_TN_ENABLE;
1177 hpet_writel(cfg, HPET_T1_CFG);
1178 return;
1181 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1182 hpet_rtc_int_freq = PIE_freq;
1183 else
1184 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1186 /* It is more accurate to use the comparator value than current count.*/
1187 cnt = hpet_t1_cmp;
1188 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1189 hpet_writel(cnt, HPET_T1_CMP);
1190 hpet_t1_cmp = cnt;
1194 * The functions below are called from rtc driver.
1195 * Return 0 if HPET is not being used.
1196 * Otherwise do the necessary changes and return 1.
1198 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1200 if (!is_hpet_enabled())
1201 return 0;
1203 if (bit_mask & RTC_UIE)
1204 UIE_on = 0;
1205 if (bit_mask & RTC_PIE)
1206 PIE_on = 0;
1207 if (bit_mask & RTC_AIE)
1208 AIE_on = 0;
1210 return 1;
1213 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1215 int timer_init_reqd = 0;
1217 if (!is_hpet_enabled())
1218 return 0;
1220 if (!(PIE_on | AIE_on | UIE_on))
1221 timer_init_reqd = 1;
1223 if (bit_mask & RTC_UIE) {
1224 UIE_on = 1;
1226 if (bit_mask & RTC_PIE) {
1227 PIE_on = 1;
1228 PIE_count = 0;
1230 if (bit_mask & RTC_AIE) {
1231 AIE_on = 1;
1234 if (timer_init_reqd)
1235 hpet_rtc_timer_init();
1237 return 1;
1240 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1242 if (!is_hpet_enabled())
1243 return 0;
1245 alarm_time.tm_hour = hrs;
1246 alarm_time.tm_min = min;
1247 alarm_time.tm_sec = sec;
1249 return 1;
1252 int hpet_set_periodic_freq(unsigned long freq)
1254 if (!is_hpet_enabled())
1255 return 0;
1257 PIE_freq = freq;
1258 PIE_count = 0;
1260 return 1;
1263 int hpet_rtc_dropped_irq(void)
1265 if (!is_hpet_enabled())
1266 return 0;
1268 return 1;
1271 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1273 struct rtc_time curr_time;
1274 unsigned long rtc_int_flag = 0;
1275 int call_rtc_interrupt = 0;
1277 hpet_rtc_timer_reinit();
1279 if (UIE_on | AIE_on) {
1280 rtc_get_rtc_time(&curr_time);
1282 if (UIE_on) {
1283 if (curr_time.tm_sec != prev_update_sec) {
1284 /* Set update int info, call real rtc int routine */
1285 call_rtc_interrupt = 1;
1286 rtc_int_flag = RTC_UF;
1287 prev_update_sec = curr_time.tm_sec;
1290 if (PIE_on) {
1291 PIE_count++;
1292 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1293 /* Set periodic int info, call real rtc int routine */
1294 call_rtc_interrupt = 1;
1295 rtc_int_flag |= RTC_PF;
1296 PIE_count = 0;
1299 if (AIE_on) {
1300 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1301 (curr_time.tm_min == alarm_time.tm_min) &&
1302 (curr_time.tm_hour == alarm_time.tm_hour)) {
1303 /* Set alarm int info, call real rtc int routine */
1304 call_rtc_interrupt = 1;
1305 rtc_int_flag |= RTC_AF;
1308 if (call_rtc_interrupt) {
1309 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1310 rtc_interrupt(rtc_int_flag, dev_id, regs);
1312 return IRQ_HANDLED;
1314 #endif
1316 static int __init nohpet_setup(char *s)
1318 nohpet = 1;
1319 return 0;
1322 __setup("nohpet", nohpet_setup);
1325 static int __init notsc_setup(char *s)
1327 notsc = 1;
1328 return 0;
1331 __setup("notsc", notsc_setup);