[PATCH] x86_64: Fix up white space in time.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / kernel / time.c
blob7b56aa5b6c3ff3a58405af35ad3cea3fc40a488b
1 /*
2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
29 #ifdef CONFIG_ACPI
30 #include <acpi/achware.h> /* for PM timer frequency */
31 #endif
32 #include <asm/8253pit.h>
33 #include <asm/pgtable.h>
34 #include <asm/vsyscall.h>
35 #include <asm/timex.h>
36 #include <asm/proto.h>
37 #include <asm/hpet.h>
38 #include <asm/sections.h>
39 #include <linux/cpufreq.h>
40 #include <linux/hpet.h>
41 #ifdef CONFIG_X86_LOCAL_APIC
42 #include <asm/apic.h>
43 #endif
45 #ifdef CONFIG_CPU_FREQ
46 static void cpufreq_delayed_get(void);
47 #endif
48 extern void i8254_timer_resume(void);
49 extern int using_apic_timer;
51 DEFINE_SPINLOCK(rtc_lock);
52 DEFINE_SPINLOCK(i8253_lock);
54 static int nohpet __initdata = 0;
55 static int notsc __initdata = 0;
57 #undef HPET_HACK_ENABLE_DANGEROUS
59 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
60 static unsigned long hpet_period; /* fsecs / HPET clock */
61 unsigned long hpet_tick; /* HPET clocks / interrupt */
62 static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
63 unsigned long vxtime_hz = PIT_TICK_RATE;
64 int report_lost_ticks; /* command line option */
65 unsigned long long monotonic_base;
67 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
69 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71 struct timespec __xtime __section_xtime;
72 struct timezone __sys_tz __section_sys_tz;
75 * do_gettimeoffset() returns microseconds since last timer interrupt was
76 * triggered by hardware. A memory read of HPET is slower than a register read
77 * of TSC, but much more reliable. It's also synchronized to the timer
78 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
79 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
80 * This is not a problem, because jiffies hasn't updated either. They are bound
81 * together by xtime_lock.
84 static inline unsigned int do_gettimeoffset_tsc(void)
86 unsigned long t;
87 unsigned long x;
88 t = get_cycles_sync();
89 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
90 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
91 return x;
94 static inline unsigned int do_gettimeoffset_hpet(void)
96 /* cap counter read to one tick to avoid inconsistencies */
97 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
98 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
101 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
104 * This version of gettimeofday() has microsecond resolution and better than
105 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
106 * MHz) HPET timer.
109 void do_gettimeofday(struct timeval *tv)
111 unsigned long seq, t;
112 unsigned int sec, usec;
114 do {
115 seq = read_seqbegin(&xtime_lock);
117 sec = xtime.tv_sec;
118 usec = xtime.tv_nsec / 1000;
120 /* i386 does some correction here to keep the clock
121 monotonous even when ntpd is fixing drift.
122 But they didn't work for me, there is a non monotonic
123 clock anyways with ntp.
124 I dropped all corrections now until a real solution can
125 be found. Note when you fix it here you need to do the same
126 in arch/x86_64/kernel/vsyscall.c and export all needed
127 variables in vmlinux.lds. -AK */
129 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
130 do_gettimeoffset();
131 usec += t;
133 } while (read_seqretry(&xtime_lock, seq));
135 tv->tv_sec = sec + usec / 1000000;
136 tv->tv_usec = usec % 1000000;
139 EXPORT_SYMBOL(do_gettimeofday);
142 * settimeofday() first undoes the correction that gettimeofday would do
143 * on the time, and then saves it. This is ugly, but has been like this for
144 * ages already.
147 int do_settimeofday(struct timespec *tv)
149 time_t wtm_sec, sec = tv->tv_sec;
150 long wtm_nsec, nsec = tv->tv_nsec;
152 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
153 return -EINVAL;
155 write_seqlock_irq(&xtime_lock);
157 nsec -= do_gettimeoffset() * 1000 +
158 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
160 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
161 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
163 set_normalized_timespec(&xtime, sec, nsec);
164 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
166 ntp_clear();
168 write_sequnlock_irq(&xtime_lock);
169 clock_was_set();
170 return 0;
173 EXPORT_SYMBOL(do_settimeofday);
175 unsigned long profile_pc(struct pt_regs *regs)
177 unsigned long pc = instruction_pointer(regs);
179 /* Assume the lock function has either no stack frame or only a single word.
180 This checks if the address on the stack looks like a kernel text address.
181 There is a small window for false hits, but in that case the tick
182 is just accounted to the spinlock function.
183 Better would be to write these functions in assembler again
184 and check exactly. */
185 if (in_lock_functions(pc)) {
186 char *v = *(char **)regs->rsp;
187 if ((v >= _stext && v <= _etext) ||
188 (v >= _sinittext && v <= _einittext) ||
189 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
190 return (unsigned long)v;
191 return ((unsigned long *)regs->rsp)[1];
193 return pc;
195 EXPORT_SYMBOL(profile_pc);
198 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
199 * ms after the second nowtime has started, because when nowtime is written
200 * into the registers of the CMOS clock, it will jump to the next second
201 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
202 * sheet for details.
205 static void set_rtc_mmss(unsigned long nowtime)
207 int real_seconds, real_minutes, cmos_minutes;
208 unsigned char control, freq_select;
211 * IRQs are disabled when we're called from the timer interrupt,
212 * no need for spin_lock_irqsave()
215 spin_lock(&rtc_lock);
218 * Tell the clock it's being set and stop it.
221 control = CMOS_READ(RTC_CONTROL);
222 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
224 freq_select = CMOS_READ(RTC_FREQ_SELECT);
225 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
227 cmos_minutes = CMOS_READ(RTC_MINUTES);
228 BCD_TO_BIN(cmos_minutes);
231 * since we're only adjusting minutes and seconds, don't interfere with hour
232 * overflow. This avoids messing with unknown time zones but requires your RTC
233 * not to be off by more than 15 minutes. Since we're calling it only when
234 * our clock is externally synchronized using NTP, this shouldn't be a problem.
237 real_seconds = nowtime % 60;
238 real_minutes = nowtime / 60;
239 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
240 real_minutes += 30; /* correct for half hour time zone */
241 real_minutes %= 60;
243 #if 0
244 /* AMD 8111 is a really bad time keeper and hits this regularly.
245 It probably was an attempt to avoid screwing up DST, but ignore
246 that for now. */
247 if (abs(real_minutes - cmos_minutes) >= 30) {
248 printk(KERN_WARNING "time.c: can't update CMOS clock "
249 "from %d to %d\n", cmos_minutes, real_minutes);
250 } else
251 #endif
254 BIN_TO_BCD(real_seconds);
255 BIN_TO_BCD(real_minutes);
256 CMOS_WRITE(real_seconds, RTC_SECONDS);
257 CMOS_WRITE(real_minutes, RTC_MINUTES);
261 * The following flags have to be released exactly in this order, otherwise the
262 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
263 * not reset the oscillator and will not update precisely 500 ms later. You
264 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
265 * believes data sheets anyway ... -- Markus Kuhn
268 CMOS_WRITE(control, RTC_CONTROL);
269 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
271 spin_unlock(&rtc_lock);
275 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
276 * Note: This function is required to return accurate
277 * time even in the absence of multiple timer ticks.
279 unsigned long long monotonic_clock(void)
281 unsigned long seq;
282 u32 last_offset, this_offset, offset;
283 unsigned long long base;
285 if (vxtime.mode == VXTIME_HPET) {
286 do {
287 seq = read_seqbegin(&xtime_lock);
289 last_offset = vxtime.last;
290 base = monotonic_base;
291 this_offset = hpet_readl(HPET_COUNTER);
292 } while (read_seqretry(&xtime_lock, seq));
293 offset = (this_offset - last_offset);
294 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
295 return base + offset;
296 } else {
297 do {
298 seq = read_seqbegin(&xtime_lock);
300 last_offset = vxtime.last_tsc;
301 base = monotonic_base;
302 } while (read_seqretry(&xtime_lock, seq));
303 this_offset = get_cycles_sync();
304 offset = (this_offset - last_offset)*1000/cpu_khz;
305 return base + offset;
308 EXPORT_SYMBOL(monotonic_clock);
310 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
312 static long lost_count;
313 static int warned;
315 if (report_lost_ticks) {
316 printk(KERN_WARNING "time.c: Lost %d timer "
317 "tick(s)! ", lost);
318 print_symbol("rip %s)\n", regs->rip);
321 if (lost_count == 1000 && !warned) {
322 printk(KERN_WARNING
323 "warning: many lost ticks.\n"
324 KERN_WARNING "Your time source seems to be instable or "
325 "some driver is hogging interupts\n");
326 print_symbol("rip %s\n", regs->rip);
327 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
328 printk(KERN_WARNING "Falling back to HPET\n");
329 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
330 vxtime.mode = VXTIME_HPET;
331 do_gettimeoffset = do_gettimeoffset_hpet;
333 /* else should fall back to PIT, but code missing. */
334 warned = 1;
335 } else
336 lost_count++;
338 #ifdef CONFIG_CPU_FREQ
339 /* In some cases the CPU can change frequency without us noticing
340 (like going into thermal throttle)
341 Give cpufreq a change to catch up. */
342 if ((lost_count+1) % 25 == 0) {
343 cpufreq_delayed_get();
345 #endif
348 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
350 static unsigned long rtc_update = 0;
351 unsigned long tsc;
352 int delay, offset = 0, lost = 0;
355 * Here we are in the timer irq handler. We have irqs locally disabled (so we
356 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
357 * on the other CPU, so we need a lock. We also need to lock the vsyscall
358 * variables, because both do_timer() and us change them -arca+vojtech
361 write_seqlock(&xtime_lock);
363 if (vxtime.hpet_address)
364 offset = hpet_readl(HPET_COUNTER);
366 if (hpet_use_timer) {
367 /* if we're using the hpet timer functionality,
368 * we can more accurately know the counter value
369 * when the timer interrupt occured.
371 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
372 delay = hpet_readl(HPET_COUNTER) - offset;
373 } else {
374 spin_lock(&i8253_lock);
375 outb_p(0x00, 0x43);
376 delay = inb_p(0x40);
377 delay |= inb(0x40) << 8;
378 spin_unlock(&i8253_lock);
379 delay = LATCH - 1 - delay;
382 tsc = get_cycles_sync();
384 if (vxtime.mode == VXTIME_HPET) {
385 if (offset - vxtime.last > hpet_tick) {
386 lost = (offset - vxtime.last) / hpet_tick - 1;
389 monotonic_base +=
390 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
392 vxtime.last = offset;
393 #ifdef CONFIG_X86_PM_TIMER
394 } else if (vxtime.mode == VXTIME_PMTMR) {
395 lost = pmtimer_mark_offset();
396 #endif
397 } else {
398 offset = (((tsc - vxtime.last_tsc) *
399 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
401 if (offset < 0)
402 offset = 0;
404 if (offset > (USEC_PER_SEC / HZ)) {
405 lost = offset / (USEC_PER_SEC / HZ);
406 offset %= (USEC_PER_SEC / HZ);
409 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
411 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
413 if ((((tsc - vxtime.last_tsc) *
414 vxtime.tsc_quot) >> 32) < offset)
415 vxtime.last_tsc = tsc -
416 (((long) offset << 32) / vxtime.tsc_quot) - 1;
419 if (lost > 0) {
420 handle_lost_ticks(lost, regs);
421 jiffies += lost;
425 * Do the timer stuff.
428 do_timer(regs);
429 #ifndef CONFIG_SMP
430 update_process_times(user_mode(regs));
431 #endif
434 * In the SMP case we use the local APIC timer interrupt to do the profiling,
435 * except when we simulate SMP mode on a uniprocessor system, in that case we
436 * have to call the local interrupt handler.
439 #ifndef CONFIG_X86_LOCAL_APIC
440 profile_tick(CPU_PROFILING, regs);
441 #else
442 if (!using_apic_timer)
443 smp_local_timer_interrupt(regs);
444 #endif
447 * If we have an externally synchronized Linux clock, then update CMOS clock
448 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
449 * closest to exactly 500 ms before the next second. If the update fails, we
450 * don't care, as it'll be updated on the next turn, and the problem (time way
451 * off) isn't likely to go away much sooner anyway.
454 if (ntp_synced() && xtime.tv_sec > rtc_update &&
455 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
456 set_rtc_mmss(xtime.tv_sec);
457 rtc_update = xtime.tv_sec + 660;
460 write_sequnlock(&xtime_lock);
462 #ifdef CONFIG_X86_LOCAL_APIC
463 if (using_apic_timer)
464 smp_send_timer_broadcast_ipi();
465 #endif
467 return IRQ_HANDLED;
470 static unsigned int cyc2ns_scale;
471 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
473 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
475 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
478 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
480 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
483 unsigned long long sched_clock(void)
485 unsigned long a = 0;
487 #if 0
488 /* Don't do a HPET read here. Using TSC always is much faster
489 and HPET may not be mapped yet when the scheduler first runs.
490 Disadvantage is a small drift between CPUs in some configurations,
491 but that should be tolerable. */
492 if (__vxtime.mode == VXTIME_HPET)
493 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
494 #endif
496 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
497 which means it is not completely exact and may not be monotonous between
498 CPUs. But the errors should be too small to matter for scheduling
499 purposes. */
501 rdtscll(a);
502 return cycles_2_ns(a);
505 unsigned long get_cmos_time(void)
507 unsigned int timeout, year, mon, day, hour, min, sec;
508 unsigned char last, this;
509 unsigned long flags;
512 * The Linux interpretation of the CMOS clock register contents: When the
513 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
514 * second which has precisely just started. Waiting for this can take up to 1
515 * second, we timeout approximately after 2.4 seconds on a machine with
516 * standard 8.3 MHz ISA bus.
519 spin_lock_irqsave(&rtc_lock, flags);
521 timeout = 1000000;
522 last = this = 0;
524 while (timeout && last && !this) {
525 last = this;
526 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
527 timeout--;
531 * Here we are safe to assume the registers won't change for a whole
532 * second, so we just go ahead and read them.
534 sec = CMOS_READ(RTC_SECONDS);
535 min = CMOS_READ(RTC_MINUTES);
536 hour = CMOS_READ(RTC_HOURS);
537 day = CMOS_READ(RTC_DAY_OF_MONTH);
538 mon = CMOS_READ(RTC_MONTH);
539 year = CMOS_READ(RTC_YEAR);
541 spin_unlock_irqrestore(&rtc_lock, flags);
544 * We know that x86-64 always uses BCD format, no need to check the
545 * config register.
548 BCD_TO_BIN(sec);
549 BCD_TO_BIN(min);
550 BCD_TO_BIN(hour);
551 BCD_TO_BIN(day);
552 BCD_TO_BIN(mon);
553 BCD_TO_BIN(year);
556 * x86-64 systems only exists since 2002.
557 * This will work up to Dec 31, 2100
559 year += 2000;
561 return mktime(year, mon, day, hour, min, sec);
564 #ifdef CONFIG_CPU_FREQ
566 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
567 changes.
569 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
570 not that important because current Opteron setups do not support
571 scaling on SMP anyroads.
573 Should fix up last_tsc too. Currently gettimeofday in the
574 first tick after the change will be slightly wrong. */
576 #include <linux/workqueue.h>
578 static unsigned int cpufreq_delayed_issched = 0;
579 static unsigned int cpufreq_init = 0;
580 static struct work_struct cpufreq_delayed_get_work;
582 static void handle_cpufreq_delayed_get(void *v)
584 unsigned int cpu;
585 for_each_online_cpu(cpu) {
586 cpufreq_get(cpu);
588 cpufreq_delayed_issched = 0;
591 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
592 * to verify the CPU frequency the timing core thinks the CPU is running
593 * at is still correct.
595 static void cpufreq_delayed_get(void)
597 static int warned;
598 if (cpufreq_init && !cpufreq_delayed_issched) {
599 cpufreq_delayed_issched = 1;
600 if (!warned) {
601 warned = 1;
602 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
604 schedule_work(&cpufreq_delayed_get_work);
608 static unsigned int ref_freq = 0;
609 static unsigned long loops_per_jiffy_ref = 0;
611 static unsigned long cpu_khz_ref = 0;
613 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
614 void *data)
616 struct cpufreq_freqs *freq = data;
617 unsigned long *lpj, dummy;
619 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
620 return 0;
622 lpj = &dummy;
623 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
624 #ifdef CONFIG_SMP
625 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
626 #else
627 lpj = &boot_cpu_data.loops_per_jiffy;
628 #endif
630 if (!ref_freq) {
631 ref_freq = freq->old;
632 loops_per_jiffy_ref = *lpj;
633 cpu_khz_ref = cpu_khz;
635 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
636 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
637 (val == CPUFREQ_RESUMECHANGE)) {
638 *lpj =
639 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
641 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
642 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
643 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
646 set_cyc2ns_scale(cpu_khz_ref);
648 return 0;
651 static struct notifier_block time_cpufreq_notifier_block = {
652 .notifier_call = time_cpufreq_notifier
655 static int __init cpufreq_tsc(void)
657 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
658 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
659 CPUFREQ_TRANSITION_NOTIFIER))
660 cpufreq_init = 1;
661 return 0;
664 core_initcall(cpufreq_tsc);
666 #endif
669 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
670 * it to the HPET timer of known frequency.
673 #define TICK_COUNT 100000000
675 static unsigned int __init hpet_calibrate_tsc(void)
677 int tsc_start, hpet_start;
678 int tsc_now, hpet_now;
679 unsigned long flags;
681 local_irq_save(flags);
682 local_irq_disable();
684 hpet_start = hpet_readl(HPET_COUNTER);
685 rdtscl(tsc_start);
687 do {
688 local_irq_disable();
689 hpet_now = hpet_readl(HPET_COUNTER);
690 tsc_now = get_cycles_sync();
691 local_irq_restore(flags);
692 } while ((tsc_now - tsc_start) < TICK_COUNT &&
693 (hpet_now - hpet_start) < TICK_COUNT);
695 return (tsc_now - tsc_start) * 1000000000L
696 / ((hpet_now - hpet_start) * hpet_period / 1000);
701 * pit_calibrate_tsc() uses the speaker output (channel 2) of
702 * the PIT. This is better than using the timer interrupt output,
703 * because we can read the value of the speaker with just one inb(),
704 * where we need three i/o operations for the interrupt channel.
705 * We count how many ticks the TSC does in 50 ms.
708 static unsigned int __init pit_calibrate_tsc(void)
710 unsigned long start, end;
711 unsigned long flags;
713 spin_lock_irqsave(&i8253_lock, flags);
715 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
717 outb(0xb0, 0x43);
718 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
719 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
720 start = get_cycles_sync();
721 while ((inb(0x61) & 0x20) == 0);
722 end = get_cycles_sync();
724 spin_unlock_irqrestore(&i8253_lock, flags);
726 return (end - start) / 50;
729 #ifdef CONFIG_HPET
730 static __init int late_hpet_init(void)
732 struct hpet_data hd;
733 unsigned int ntimer;
735 if (!vxtime.hpet_address)
736 return -1;
738 memset(&hd, 0, sizeof (hd));
740 ntimer = hpet_readl(HPET_ID);
741 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
742 ntimer++;
745 * Register with driver.
746 * Timer0 and Timer1 is used by platform.
748 hd.hd_phys_address = vxtime.hpet_address;
749 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
750 hd.hd_nirqs = ntimer;
751 hd.hd_flags = HPET_DATA_PLATFORM;
752 hpet_reserve_timer(&hd, 0);
753 #ifdef CONFIG_HPET_EMULATE_RTC
754 hpet_reserve_timer(&hd, 1);
755 #endif
756 hd.hd_irq[0] = HPET_LEGACY_8254;
757 hd.hd_irq[1] = HPET_LEGACY_RTC;
758 if (ntimer > 2) {
759 struct hpet *hpet;
760 struct hpet_timer *timer;
761 int i;
763 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
765 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
766 timer++, i++)
767 hd.hd_irq[i] = (timer->hpet_config &
768 Tn_INT_ROUTE_CNF_MASK) >>
769 Tn_INT_ROUTE_CNF_SHIFT;
773 hpet_alloc(&hd);
774 return 0;
776 fs_initcall(late_hpet_init);
777 #endif
779 static int hpet_timer_stop_set_go(unsigned long tick)
781 unsigned int cfg;
784 * Stop the timers and reset the main counter.
787 cfg = hpet_readl(HPET_CFG);
788 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
789 hpet_writel(cfg, HPET_CFG);
790 hpet_writel(0, HPET_COUNTER);
791 hpet_writel(0, HPET_COUNTER + 4);
794 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
795 * and period also hpet_tick.
797 if (hpet_use_timer) {
798 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
799 HPET_TN_32BIT, HPET_T0_CFG);
800 hpet_writel(hpet_tick, HPET_T0_CMP);
801 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
802 cfg |= HPET_CFG_LEGACY;
805 * Go!
808 cfg |= HPET_CFG_ENABLE;
809 hpet_writel(cfg, HPET_CFG);
811 return 0;
814 static int hpet_init(void)
816 unsigned int id;
818 if (!vxtime.hpet_address)
819 return -1;
820 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
821 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
824 * Read the period, compute tick and quotient.
827 id = hpet_readl(HPET_ID);
829 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
830 return -1;
832 hpet_period = hpet_readl(HPET_PERIOD);
833 if (hpet_period < 100000 || hpet_period > 100000000)
834 return -1;
836 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
837 hpet_period;
839 hpet_use_timer = (id & HPET_ID_LEGSUP);
841 return hpet_timer_stop_set_go(hpet_tick);
844 static int hpet_reenable(void)
846 return hpet_timer_stop_set_go(hpet_tick);
849 void __init pit_init(void)
851 unsigned long flags;
853 spin_lock_irqsave(&i8253_lock, flags);
854 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
855 outb_p(LATCH & 0xff, 0x40); /* LSB */
856 outb_p(LATCH >> 8, 0x40); /* MSB */
857 spin_unlock_irqrestore(&i8253_lock, flags);
860 int __init time_setup(char *str)
862 report_lost_ticks = 1;
863 return 1;
866 static struct irqaction irq0 = {
867 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
870 void __init time_init(void)
872 char *timename;
874 #ifdef HPET_HACK_ENABLE_DANGEROUS
875 if (!vxtime.hpet_address) {
876 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
877 "manually!\n");
878 outl(0x800038a0, 0xcf8);
879 outl(0xff000001, 0xcfc);
880 outl(0x800038a0, 0xcf8);
881 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
882 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
883 "at %#lx.\n", vxtime.hpet_address);
885 #endif
886 if (nohpet)
887 vxtime.hpet_address = 0;
889 xtime.tv_sec = get_cmos_time();
890 xtime.tv_nsec = 0;
892 set_normalized_timespec(&wall_to_monotonic,
893 -xtime.tv_sec, -xtime.tv_nsec);
895 if (!hpet_init())
896 vxtime_hz = (1000000000000000L + hpet_period / 2) /
897 hpet_period;
898 else
899 vxtime.hpet_address = 0;
901 if (hpet_use_timer) {
902 cpu_khz = hpet_calibrate_tsc();
903 timename = "HPET";
904 #ifdef CONFIG_X86_PM_TIMER
905 } else if (pmtmr_ioport && !vxtime.hpet_address) {
906 vxtime_hz = PM_TIMER_FREQUENCY;
907 timename = "PM";
908 pit_init();
909 cpu_khz = pit_calibrate_tsc();
910 #endif
911 } else {
912 pit_init();
913 cpu_khz = pit_calibrate_tsc();
914 timename = "PIT";
917 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
918 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
919 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
920 cpu_khz / 1000, cpu_khz % 1000);
921 vxtime.mode = VXTIME_TSC;
922 vxtime.quot = (1000000L << 32) / vxtime_hz;
923 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
924 vxtime.last_tsc = get_cycles_sync();
925 setup_irq(0, &irq0);
927 set_cyc2ns_scale(cpu_khz);
929 #ifndef CONFIG_SMP
930 time_init_gtod();
931 #endif
935 * Make an educated guess if the TSC is trustworthy and synchronized
936 * over all CPUs.
938 __init int unsynchronized_tsc(void)
940 #ifdef CONFIG_SMP
941 if (oem_force_hpet_timer())
942 return 1;
943 /* Intel systems are normally all synchronized. Exceptions
944 are handled in the OEM check above. */
945 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
946 return 0;
947 #endif
948 /* Assume multi socket systems are not synchronized */
949 return num_present_cpus() > 1;
953 * Decide after all CPUs are booted what mode gettimeofday should use.
955 void __init time_init_gtod(void)
957 char *timetype;
959 if (unsynchronized_tsc())
960 notsc = 1;
961 if (vxtime.hpet_address && notsc) {
962 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
963 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
964 vxtime.mode = VXTIME_HPET;
965 do_gettimeoffset = do_gettimeoffset_hpet;
966 #ifdef CONFIG_X86_PM_TIMER
967 /* Using PM for gettimeofday is quite slow, but we have no other
968 choice because the TSC is too unreliable on some systems. */
969 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
970 timetype = "PM";
971 do_gettimeoffset = do_gettimeoffset_pm;
972 vxtime.mode = VXTIME_PMTMR;
973 sysctl_vsyscall = 0;
974 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
975 #endif
976 } else {
977 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
978 vxtime.mode = VXTIME_TSC;
981 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
984 __setup("report_lost_ticks", time_setup);
986 static long clock_cmos_diff;
987 static unsigned long sleep_start;
990 * sysfs support for the timer.
993 static int timer_suspend(struct sys_device *dev, pm_message_t state)
996 * Estimate time zone so that set_time can update the clock
998 long cmos_time = get_cmos_time();
1000 clock_cmos_diff = -cmos_time;
1001 clock_cmos_diff += get_seconds();
1002 sleep_start = cmos_time;
1003 return 0;
1006 static int timer_resume(struct sys_device *dev)
1008 unsigned long flags;
1009 unsigned long sec;
1010 unsigned long ctime = get_cmos_time();
1011 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1013 if (vxtime.hpet_address)
1014 hpet_reenable();
1015 else
1016 i8254_timer_resume();
1018 sec = ctime + clock_cmos_diff;
1019 write_seqlock_irqsave(&xtime_lock,flags);
1020 xtime.tv_sec = sec;
1021 xtime.tv_nsec = 0;
1022 write_sequnlock_irqrestore(&xtime_lock,flags);
1023 jiffies += sleep_length;
1024 wall_jiffies += sleep_length;
1025 touch_softlockup_watchdog();
1026 return 0;
1029 static struct sysdev_class timer_sysclass = {
1030 .resume = timer_resume,
1031 .suspend = timer_suspend,
1032 set_kset_name("timer"),
1035 /* XXX this driverfs stuff should probably go elsewhere later -john */
1036 static struct sys_device device_timer = {
1037 .id = 0,
1038 .cls = &timer_sysclass,
1041 static int time_init_device(void)
1043 int error = sysdev_class_register(&timer_sysclass);
1044 if (!error)
1045 error = sysdev_register(&device_timer);
1046 return error;
1049 device_initcall(time_init_device);
1051 #ifdef CONFIG_HPET_EMULATE_RTC
1052 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1053 * is enabled, we support RTC interrupt functionality in software.
1054 * RTC has 3 kinds of interrupts:
1055 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1056 * is updated
1057 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1058 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1059 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1060 * (1) and (2) above are implemented using polling at a frequency of
1061 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1062 * overhead. (DEFAULT_RTC_INT_FREQ)
1063 * For (3), we use interrupts at 64Hz or user specified periodic
1064 * frequency, whichever is higher.
1066 #include <linux/rtc.h>
1068 #define DEFAULT_RTC_INT_FREQ 64
1069 #define RTC_NUM_INTS 1
1071 static unsigned long UIE_on;
1072 static unsigned long prev_update_sec;
1074 static unsigned long AIE_on;
1075 static struct rtc_time alarm_time;
1077 static unsigned long PIE_on;
1078 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1079 static unsigned long PIE_count;
1081 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1082 static unsigned int hpet_t1_cmp; /* cached comparator register */
1084 int is_hpet_enabled(void)
1086 return vxtime.hpet_address != 0;
1090 * Timer 1 for RTC, we do not use periodic interrupt feature,
1091 * even if HPET supports periodic interrupts on Timer 1.
1092 * The reason being, to set up a periodic interrupt in HPET, we need to
1093 * stop the main counter. And if we do that everytime someone diables/enables
1094 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1095 * So, for the time being, simulate the periodic interrupt in software.
1097 * hpet_rtc_timer_init() is called for the first time and during subsequent
1098 * interuppts reinit happens through hpet_rtc_timer_reinit().
1100 int hpet_rtc_timer_init(void)
1102 unsigned int cfg, cnt;
1103 unsigned long flags;
1105 if (!is_hpet_enabled())
1106 return 0;
1108 * Set the counter 1 and enable the interrupts.
1110 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1111 hpet_rtc_int_freq = PIE_freq;
1112 else
1113 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1115 local_irq_save(flags);
1116 cnt = hpet_readl(HPET_COUNTER);
1117 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1118 hpet_writel(cnt, HPET_T1_CMP);
1119 hpet_t1_cmp = cnt;
1120 local_irq_restore(flags);
1122 cfg = hpet_readl(HPET_T1_CFG);
1123 cfg &= ~HPET_TN_PERIODIC;
1124 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1125 hpet_writel(cfg, HPET_T1_CFG);
1127 return 1;
1130 static void hpet_rtc_timer_reinit(void)
1132 unsigned int cfg, cnt;
1134 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1135 cfg = hpet_readl(HPET_T1_CFG);
1136 cfg &= ~HPET_TN_ENABLE;
1137 hpet_writel(cfg, HPET_T1_CFG);
1138 return;
1141 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1142 hpet_rtc_int_freq = PIE_freq;
1143 else
1144 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1146 /* It is more accurate to use the comparator value than current count.*/
1147 cnt = hpet_t1_cmp;
1148 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1149 hpet_writel(cnt, HPET_T1_CMP);
1150 hpet_t1_cmp = cnt;
1154 * The functions below are called from rtc driver.
1155 * Return 0 if HPET is not being used.
1156 * Otherwise do the necessary changes and return 1.
1158 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1160 if (!is_hpet_enabled())
1161 return 0;
1163 if (bit_mask & RTC_UIE)
1164 UIE_on = 0;
1165 if (bit_mask & RTC_PIE)
1166 PIE_on = 0;
1167 if (bit_mask & RTC_AIE)
1168 AIE_on = 0;
1170 return 1;
1173 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1175 int timer_init_reqd = 0;
1177 if (!is_hpet_enabled())
1178 return 0;
1180 if (!(PIE_on | AIE_on | UIE_on))
1181 timer_init_reqd = 1;
1183 if (bit_mask & RTC_UIE) {
1184 UIE_on = 1;
1186 if (bit_mask & RTC_PIE) {
1187 PIE_on = 1;
1188 PIE_count = 0;
1190 if (bit_mask & RTC_AIE) {
1191 AIE_on = 1;
1194 if (timer_init_reqd)
1195 hpet_rtc_timer_init();
1197 return 1;
1200 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1202 if (!is_hpet_enabled())
1203 return 0;
1205 alarm_time.tm_hour = hrs;
1206 alarm_time.tm_min = min;
1207 alarm_time.tm_sec = sec;
1209 return 1;
1212 int hpet_set_periodic_freq(unsigned long freq)
1214 if (!is_hpet_enabled())
1215 return 0;
1217 PIE_freq = freq;
1218 PIE_count = 0;
1220 return 1;
1223 int hpet_rtc_dropped_irq(void)
1225 if (!is_hpet_enabled())
1226 return 0;
1228 return 1;
1231 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1233 struct rtc_time curr_time;
1234 unsigned long rtc_int_flag = 0;
1235 int call_rtc_interrupt = 0;
1237 hpet_rtc_timer_reinit();
1239 if (UIE_on | AIE_on) {
1240 rtc_get_rtc_time(&curr_time);
1242 if (UIE_on) {
1243 if (curr_time.tm_sec != prev_update_sec) {
1244 /* Set update int info, call real rtc int routine */
1245 call_rtc_interrupt = 1;
1246 rtc_int_flag = RTC_UF;
1247 prev_update_sec = curr_time.tm_sec;
1250 if (PIE_on) {
1251 PIE_count++;
1252 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1253 /* Set periodic int info, call real rtc int routine */
1254 call_rtc_interrupt = 1;
1255 rtc_int_flag |= RTC_PF;
1256 PIE_count = 0;
1259 if (AIE_on) {
1260 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1261 (curr_time.tm_min == alarm_time.tm_min) &&
1262 (curr_time.tm_hour == alarm_time.tm_hour)) {
1263 /* Set alarm int info, call real rtc int routine */
1264 call_rtc_interrupt = 1;
1265 rtc_int_flag |= RTC_AF;
1268 if (call_rtc_interrupt) {
1269 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1270 rtc_interrupt(rtc_int_flag, dev_id, regs);
1272 return IRQ_HANDLED;
1274 #endif
1276 static int __init nohpet_setup(char *s)
1278 nohpet = 1;
1279 return 0;
1282 __setup("nohpet", nohpet_setup);
1285 static int __init notsc_setup(char *s)
1287 notsc = 1;
1288 return 0;
1291 __setup("notsc", notsc_setup);