Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / sh64 / kernel / time.c
blob6c84da3efc7383d3511434c91ef7815a05b2421d
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * arch/sh64/kernel/time.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003 Richard Curnow
12 * Original TMU/RTC code taken from sh version.
13 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
14 * Some code taken from i386 version.
15 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
18 #include <linux/config.h>
19 #include <linux/errno.h>
20 #include <linux/rwsem.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/param.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/interrupt.h>
27 #include <linux/time.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/profile.h>
31 #include <linux/smp.h>
33 #include <asm/registers.h> /* required by inline __asm__ stmt. */
35 #include <asm/processor.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/delay.h>
41 #include <linux/timex.h>
42 #include <linux/irq.h>
43 #include <asm/hardware.h>
45 #define TMU_TOCR_INIT 0x00
46 #define TMU0_TCR_INIT 0x0020
47 #define TMU_TSTR_INIT 1
48 #define TMU_TSTR_OFF 0
50 /* RCR1 Bits */
51 #define RCR1_CF 0x80 /* Carry Flag */
52 #define RCR1_CIE 0x10 /* Carry Interrupt Enable */
53 #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
54 #define RCR1_AF 0x01 /* Alarm Flag */
56 /* RCR2 Bits */
57 #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
58 #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
59 #define RCR2_RTCEN 0x08 /* ENable RTC */
60 #define RCR2_ADJ 0x04 /* ADJustment (30-second) */
61 #define RCR2_RESET 0x02 /* Reset bit */
62 #define RCR2_START 0x01 /* Start bit */
64 /* Clock, Power and Reset Controller */
65 #define CPRC_BLOCK_OFF 0x01010000
66 #define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
68 #define FRQCR (cprc_base+0x0)
69 #define WTCSR (cprc_base+0x0018)
70 #define STBCR (cprc_base+0x0030)
72 /* Time Management Unit */
73 #define TMU_BLOCK_OFF 0x01020000
74 #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
75 #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
76 #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
77 #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
79 #define TMU_TOCR tmu_base+0x0 /* Byte access */
80 #define TMU_TSTR tmu_base+0x4 /* Byte access */
82 #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
83 #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
84 #define TMU0_TCR TMU0_BASE+0x8 /* Word access */
86 /* Real Time Clock */
87 #define RTC_BLOCK_OFF 0x01040000
88 #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
90 #define R64CNT rtc_base+0x00
91 #define RSECCNT rtc_base+0x04
92 #define RMINCNT rtc_base+0x08
93 #define RHRCNT rtc_base+0x0c
94 #define RWKCNT rtc_base+0x10
95 #define RDAYCNT rtc_base+0x14
96 #define RMONCNT rtc_base+0x18
97 #define RYRCNT rtc_base+0x1c /* 16bit */
98 #define RSECAR rtc_base+0x20
99 #define RMINAR rtc_base+0x24
100 #define RHRAR rtc_base+0x28
101 #define RWKAR rtc_base+0x2c
102 #define RDAYAR rtc_base+0x30
103 #define RMONAR rtc_base+0x34
104 #define RCR1 rtc_base+0x38
105 #define RCR2 rtc_base+0x3c
107 #ifndef BCD_TO_BIN
108 #define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
109 #endif
111 #ifndef BIN_TO_BCD
112 #define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
113 #endif
115 #define TICK_SIZE (tick_nsec / 1000)
117 extern unsigned long wall_jiffies;
119 u64 jiffies_64 = INITIAL_JIFFIES;
121 static unsigned long tmu_base, rtc_base;
122 unsigned long cprc_base;
124 /* Variables to allow interpolation of time of day to resolution better than a
125 * jiffy. */
127 /* This is effectively protected by xtime_lock */
128 static unsigned long ctc_last_interrupt;
129 static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
131 #define CTC_JIFFY_SCALE_SHIFT 40
133 /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
134 static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
136 /* Estimate number of microseconds that have elapsed since the last timer tick,
137 by scaling the delta that has occured in the CTC register.
139 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
140 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
141 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
142 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
143 sleeping, though will be coarser.
145 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
146 is running or if the freq or tick arguments of adjtimex are modified after
147 we have calibrated the scaling factor? This will result in either a jump at
148 the end of a tick period, or a wrap backwards at the start of the next one,
149 if the application is reading the time of day often enough. I think we
150 ought to do better than this. For this reason, usecs_per_jiffy is left
151 separated out in the calculation below. This allows some future hook into
152 the adjtime-related stuff in kernel/timer.c to remove this hazard.
156 static unsigned long usecs_since_tick(void)
158 unsigned long long current_ctc;
159 long ctc_ticks_since_interrupt;
160 unsigned long long ull_ctc_ticks_since_interrupt;
161 unsigned long result;
163 unsigned long long mul1_out;
164 unsigned long long mul1_out_high;
165 unsigned long long mul2_out_low, mul2_out_high;
167 /* Read CTC register */
168 asm ("getcon cr62, %0" : "=r" (current_ctc));
169 /* Note, the CTC counts down on each CPU clock, not up.
170 Note(2), use long type to get correct wraparound arithmetic when
171 the counter crosses zero. */
172 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
173 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
175 /* Inline assembly to do 32x32x32->64 multiplier */
176 asm volatile ("mulu.l %1, %2, %0" :
177 "=r" (mul1_out) :
178 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
180 mul1_out_high = mul1_out >> 32;
182 asm volatile ("mulu.l %1, %2, %0" :
183 "=r" (mul2_out_low) :
184 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
186 #if 1
187 asm volatile ("mulu.l %1, %2, %0" :
188 "=r" (mul2_out_high) :
189 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
190 #endif
192 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
194 return result;
197 void do_gettimeofday(struct timeval *tv)
199 unsigned long flags;
200 unsigned long seq;
201 unsigned long usec, sec;
203 do {
204 seq = read_seqbegin_irqsave(&xtime_lock, flags);
205 usec = usecs_since_tick();
207 unsigned long lost = jiffies - wall_jiffies;
209 if (lost)
210 usec += lost * (1000000 / HZ);
213 sec = xtime.tv_sec;
214 usec += xtime.tv_nsec / 1000;
215 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
217 while (usec >= 1000000) {
218 usec -= 1000000;
219 sec++;
222 tv->tv_sec = sec;
223 tv->tv_usec = usec;
226 int do_settimeofday(struct timespec *tv)
228 time_t wtm_sec, sec = tv->tv_sec;
229 long wtm_nsec, nsec = tv->tv_nsec;
231 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
232 return -EINVAL;
234 write_seqlock_irq(&xtime_lock);
236 * This is revolting. We need to set "xtime" correctly. However, the
237 * value in this location is the value at the most recent update of
238 * wall time. Discover what correction gettimeofday() would have
239 * made, and then undo it!
241 nsec -= 1000 * (usecs_since_tick() +
242 (jiffies - wall_jiffies) * (1000000 / HZ));
244 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
245 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
247 set_normalized_timespec(&xtime, sec, nsec);
248 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
250 time_adjust = 0; /* stop active adjtime() */
251 time_status |= STA_UNSYNC;
252 time_maxerror = NTP_PHASE_LIMIT;
253 time_esterror = NTP_PHASE_LIMIT;
254 write_sequnlock_irq(&xtime_lock);
255 clock_was_set();
257 return 0;
260 static int set_rtc_time(unsigned long nowtime)
262 int retval = 0;
263 int real_seconds, real_minutes, cmos_minutes;
265 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
267 cmos_minutes = ctrl_inb(RMINCNT);
268 BCD_TO_BIN(cmos_minutes);
271 * since we're only adjusting minutes and seconds,
272 * don't interfere with hour overflow. This avoids
273 * messing with unknown time zones but requires your
274 * RTC not to be off by more than 15 minutes
276 real_seconds = nowtime % 60;
277 real_minutes = nowtime / 60;
278 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
279 real_minutes += 30; /* correct for half hour time zone */
280 real_minutes %= 60;
282 if (abs(real_minutes - cmos_minutes) < 30) {
283 BIN_TO_BCD(real_seconds);
284 BIN_TO_BCD(real_minutes);
285 ctrl_outb(real_seconds, RSECCNT);
286 ctrl_outb(real_minutes, RMINCNT);
287 } else {
288 printk(KERN_WARNING
289 "set_rtc_time: can't update from %d to %d\n",
290 cmos_minutes, real_minutes);
291 retval = -1;
294 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
296 return retval;
299 /* last time the RTC clock got updated */
300 static long last_rtc_update = 0;
303 * timer_interrupt() needs to keep up the real-time clock,
304 * as well as call the "do_timer()" routine every clocktick
306 static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
308 unsigned long long current_ctc;
309 asm ("getcon cr62, %0" : "=r" (current_ctc));
310 ctc_last_interrupt = (unsigned long) current_ctc;
312 do_timer(regs);
313 #ifndef CONFIG_SMP
314 update_process_times(user_mode(regs));
315 #endif
316 profile_tick(CPU_PROFILING, regs);
318 #ifdef CONFIG_HEARTBEAT
320 extern void heartbeat(void);
322 heartbeat();
324 #endif
327 * If we have an externally synchronized Linux clock, then update
328 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
329 * called as close as possible to 500 ms before the new second starts.
331 if ((time_status & STA_UNSYNC) == 0 &&
332 xtime.tv_sec > last_rtc_update + 660 &&
333 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
334 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
335 if (set_rtc_time(xtime.tv_sec) == 0)
336 last_rtc_update = xtime.tv_sec;
337 else
338 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
343 * This is the same as the above, except we _also_ save the current
344 * Time Stamp Counter value at the time of the timer interrupt, so that
345 * we later on can estimate the time of day more exactly.
347 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
349 unsigned long timer_status;
351 /* Clear UNF bit */
352 timer_status = ctrl_inw(TMU0_TCR);
353 timer_status &= ~0x100;
354 ctrl_outw(timer_status, TMU0_TCR);
357 * Here we are in the timer irq handler. We just have irqs locally
358 * disabled but we don't know if the timer_bh is running on the other
359 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
360 * the irq version of write_lock because as just said we have irq
361 * locally disabled. -arca
363 write_lock(&xtime_lock);
364 do_timer_interrupt(irq, NULL, regs);
365 write_unlock(&xtime_lock);
367 return IRQ_HANDLED;
370 static unsigned long get_rtc_time(void)
372 unsigned int sec, min, hr, wk, day, mon, yr, yr100;
374 again:
375 do {
376 ctrl_outb(0, RCR1); /* Clear CF-bit */
377 sec = ctrl_inb(RSECCNT);
378 min = ctrl_inb(RMINCNT);
379 hr = ctrl_inb(RHRCNT);
380 wk = ctrl_inb(RWKCNT);
381 day = ctrl_inb(RDAYCNT);
382 mon = ctrl_inb(RMONCNT);
383 yr = ctrl_inw(RYRCNT);
384 yr100 = (yr >> 8);
385 yr &= 0xff;
386 } while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
388 BCD_TO_BIN(yr100);
389 BCD_TO_BIN(yr);
390 BCD_TO_BIN(mon);
391 BCD_TO_BIN(day);
392 BCD_TO_BIN(hr);
393 BCD_TO_BIN(min);
394 BCD_TO_BIN(sec);
396 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
397 hr > 23 || min > 59 || sec > 59) {
398 printk(KERN_ERR
399 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
400 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
401 ctrl_outb(0, RSECCNT);
402 ctrl_outb(0, RMINCNT);
403 ctrl_outb(0, RHRCNT);
404 ctrl_outb(6, RWKCNT);
405 ctrl_outb(1, RDAYCNT);
406 ctrl_outb(1, RMONCNT);
407 ctrl_outw(0x2000, RYRCNT);
408 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
409 goto again;
412 return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
415 static __init unsigned int get_cpu_hz(void)
417 unsigned int count;
418 unsigned long __dummy;
419 unsigned long ctc_val_init, ctc_val;
422 ** Regardless the toolchain, force the compiler to use the
423 ** arbitrary register r3 as a clock tick counter.
424 ** NOTE: r3 must be in accordance with rtc_interrupt()
426 register unsigned long long __rtc_irq_flag __asm__ ("r3");
428 local_irq_enable();
429 do {} while (ctrl_inb(R64CNT) != 0);
430 ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
433 * r3 is arbitrary. CDC does not support "=z".
435 ctc_val_init = 0xffffffff;
436 ctc_val = ctc_val_init;
438 asm volatile("gettr tr0, %1\n\t"
439 "putcon %0, " __CTC "\n\t"
440 "and %2, r63, %2\n\t"
441 "pta $+4, tr0\n\t"
442 "beq/l %2, r63, tr0\n\t"
443 "ptabs %1, tr0\n\t"
444 "getcon " __CTC ", %0\n\t"
445 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
446 : "0" (0));
447 local_irq_disable();
449 * SH-3:
450 * CPU clock = 4 stages * loop
451 * tst rm,rm if id ex
452 * bt/s 1b if id ex
453 * add #1,rd if id ex
454 * (if) pipe line stole
455 * tst rm,rm if id ex
456 * ....
459 * SH-4:
460 * CPU clock = 6 stages * loop
461 * I don't know why.
462 * ....
464 * SH-5:
465 * Use CTC register to count. This approach returns the right value
466 * even if the I-cache is disabled (e.g. whilst debugging.)
470 count = ctc_val_init - ctc_val; /* CTC counts down */
472 #if defined (CONFIG_SH_SIMULATOR)
474 * Let's pretend we are a 5MHz SH-5 to avoid a too
475 * little timer interval. Also to keep delay
476 * calibration within a reasonable time.
478 return 5000000;
479 #else
481 * This really is count by the number of clock cycles
482 * by the ratio between a complete R64CNT
483 * wrap-around (128) and CUI interrupt being raised (64).
485 return count*2;
486 #endif
489 static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
491 ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
492 regs->regs[3] = 1; /* Using r3 */
494 return IRQ_HANDLED;
497 static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL};
498 static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL};
500 void __init time_init(void)
502 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
503 unsigned long interval;
504 unsigned long frqcr, ifc, pfc;
505 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
506 #define bfc_table ifc_table /* Same */
507 #define pfc_table ifc_table /* Same */
509 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
510 if (!tmu_base) {
511 panic("Unable to remap TMU\n");
514 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
515 if (!rtc_base) {
516 panic("Unable to remap RTC\n");
519 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
520 if (!cprc_base) {
521 panic("Unable to remap CPRC\n");
524 xtime.tv_sec = get_rtc_time();
525 xtime.tv_nsec = 0;
527 setup_irq(TIMER_IRQ, &irq0);
528 setup_irq(RTC_IRQ, &irq1);
530 /* Check how fast it is.. */
531 cpu_clock = get_cpu_hz();
533 /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
534 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
536 disable_irq(RTC_IRQ);
538 printk("CPU clock: %d.%02dMHz\n",
539 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
541 unsigned short bfc;
542 frqcr = ctrl_inl(FRQCR);
543 ifc = ifc_table[(frqcr>> 6) & 0x0007];
544 bfc = bfc_table[(frqcr>> 3) & 0x0007];
545 pfc = pfc_table[(frqcr>> 12) & 0x0007];
546 master_clock = cpu_clock * ifc;
547 bus_clock = master_clock/bfc;
550 printk("Bus clock: %d.%02dMHz\n",
551 (bus_clock/1000000), (bus_clock % 1000000)/10000);
552 module_clock = master_clock/pfc;
553 printk("Module clock: %d.%02dMHz\n",
554 (module_clock/1000000), (module_clock % 1000000)/10000);
555 interval = (module_clock/(HZ*4));
557 printk("Interval = %ld\n", interval);
559 current_cpu_data.cpu_clock = cpu_clock;
560 current_cpu_data.master_clock = master_clock;
561 current_cpu_data.bus_clock = bus_clock;
562 current_cpu_data.module_clock = module_clock;
564 /* Start TMU0 */
565 ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
566 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
567 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
568 ctrl_outl(interval, TMU0_TCOR);
569 ctrl_outl(interval, TMU0_TCNT);
570 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
573 void enter_deep_standby(void)
575 /* Disable watchdog timer */
576 ctrl_outl(0xa5000000, WTCSR);
577 /* Configure deep standby on sleep */
578 ctrl_outl(0x03, STBCR);
580 #ifdef CONFIG_SH_ALPHANUMERIC
582 extern void mach_alphanum(int position, unsigned char value);
583 extern void mach_alphanum_brightness(int setting);
584 char halted[] = "Halted. ";
585 int i;
586 mach_alphanum_brightness(6); /* dimmest setting above off */
587 for (i=0; i<8; i++) {
588 mach_alphanum(i, halted[i]);
590 asm __volatile__ ("synco");
592 #endif
594 asm __volatile__ ("sleep");
595 asm __volatile__ ("synci");
596 asm __volatile__ ("nop");
597 asm __volatile__ ("nop");
598 asm __volatile__ ("nop");
599 asm __volatile__ ("nop");
600 panic("Unexpected wakeup!\n");
604 * Scheduler clock - returns current time in nanosec units.
606 unsigned long long sched_clock(void)
608 return (unsigned long long)jiffies * (1000000000 / HZ);