2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 #include <linux/delay.h>
56 #include <linux/perf_counter.h>
59 #include <asm/processor.h>
60 #include <asm/nvram.h>
61 #include <asm/cache.h>
62 #include <asm/machdep.h>
63 #include <asm/uaccess.h>
67 #include <asm/div64.h>
69 #include <asm/vdso_datapage.h>
70 #include <asm/firmware.h>
71 #include <asm/cputime.h>
72 #ifdef CONFIG_PPC_ISERIES
73 #include <asm/iseries/it_lp_queue.h>
74 #include <asm/iseries/hv_call_xm.h>
77 /* powerpc clocksource/clockevent code */
79 #include <linux/clockchips.h>
80 #include <linux/clocksource.h>
82 static cycle_t
rtc_read(struct clocksource
*);
83 static struct clocksource clocksource_rtc
= {
86 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
87 .mask
= CLOCKSOURCE_MASK(64),
89 .mult
= 0, /* To be filled in */
93 static cycle_t
timebase_read(struct clocksource
*);
94 static struct clocksource clocksource_timebase
= {
97 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
98 .mask
= CLOCKSOURCE_MASK(64),
100 .mult
= 0, /* To be filled in */
101 .read
= timebase_read
,
104 #define DECREMENTER_MAX 0x7fffffff
106 static int decrementer_set_next_event(unsigned long evt
,
107 struct clock_event_device
*dev
);
108 static void decrementer_set_mode(enum clock_event_mode mode
,
109 struct clock_event_device
*dev
);
111 static struct clock_event_device decrementer_clockevent
= {
112 .name
= "decrementer",
114 .shift
= 0, /* To be filled in */
115 .mult
= 0, /* To be filled in */
117 .set_next_event
= decrementer_set_next_event
,
118 .set_mode
= decrementer_set_mode
,
119 .features
= CLOCK_EVT_FEAT_ONESHOT
,
122 struct decrementer_clock
{
123 struct clock_event_device event
;
127 static DEFINE_PER_CPU(struct decrementer_clock
, decrementers
);
129 #ifdef CONFIG_PPC_ISERIES
130 static unsigned long __initdata iSeries_recal_titan
;
131 static signed long __initdata iSeries_recal_tb
;
133 /* Forward declaration is only needed for iSereis compiles */
134 static void __init
clocksource_init(void);
137 #define XSEC_PER_SEC (1024*1024)
140 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
142 /* compute ((xsec << 12) * max) >> 32 */
143 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
146 unsigned long tb_ticks_per_jiffy
;
147 unsigned long tb_ticks_per_usec
= 100; /* sane default */
148 EXPORT_SYMBOL(tb_ticks_per_usec
);
149 unsigned long tb_ticks_per_sec
;
150 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
154 #define TICKLEN_SCALE NTP_SCALE_SHIFT
155 static u64 last_tick_len
; /* units are ns / 2^TICKLEN_SCALE */
156 static u64 ticklen_to_xs
; /* 0.64 fraction */
158 /* If last_tick_len corresponds to about 1/HZ seconds, then
159 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
160 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
162 DEFINE_SPINLOCK(rtc_lock
);
163 EXPORT_SYMBOL_GPL(rtc_lock
);
165 static u64 tb_to_ns_scale __read_mostly
;
166 static unsigned tb_to_ns_shift __read_mostly
;
167 static unsigned long boot_tb __read_mostly
;
169 extern struct timezone sys_tz
;
170 static long timezone_offset
;
172 unsigned long ppc_proc_freq
;
173 EXPORT_SYMBOL(ppc_proc_freq
);
174 unsigned long ppc_tb_freq
;
176 static u64 tb_last_jiffy __cacheline_aligned_in_smp
;
177 static DEFINE_PER_CPU(u64
, last_jiffy
);
179 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
181 * Factors for converting from cputime_t (timebase ticks) to
182 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
183 * These are all stored as 0.64 fixed-point binary fractions.
185 u64 __cputime_jiffies_factor
;
186 EXPORT_SYMBOL(__cputime_jiffies_factor
);
187 u64 __cputime_msec_factor
;
188 EXPORT_SYMBOL(__cputime_msec_factor
);
189 u64 __cputime_sec_factor
;
190 EXPORT_SYMBOL(__cputime_sec_factor
);
191 u64 __cputime_clockt_factor
;
192 EXPORT_SYMBOL(__cputime_clockt_factor
);
193 DEFINE_PER_CPU(unsigned long, cputime_last_delta
);
194 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta
);
196 static void calc_cputime_factors(void)
198 struct div_result res
;
200 div128_by_32(HZ
, 0, tb_ticks_per_sec
, &res
);
201 __cputime_jiffies_factor
= res
.result_low
;
202 div128_by_32(1000, 0, tb_ticks_per_sec
, &res
);
203 __cputime_msec_factor
= res
.result_low
;
204 div128_by_32(1, 0, tb_ticks_per_sec
, &res
);
205 __cputime_sec_factor
= res
.result_low
;
206 div128_by_32(USER_HZ
, 0, tb_ticks_per_sec
, &res
);
207 __cputime_clockt_factor
= res
.result_low
;
211 * Read the PURR on systems that have it, otherwise the timebase.
213 static u64
read_purr(void)
215 if (cpu_has_feature(CPU_FTR_PURR
))
216 return mfspr(SPRN_PURR
);
221 * Read the SPURR on systems that have it, otherwise the purr
223 static u64
read_spurr(u64 purr
)
226 * cpus without PURR won't have a SPURR
227 * We already know the former when we use this, so tell gcc
229 if (cpu_has_feature(CPU_FTR_PURR
) && cpu_has_feature(CPU_FTR_SPURR
))
230 return mfspr(SPRN_SPURR
);
235 * Account time for a transition between system, hard irq
238 void account_system_vtime(struct task_struct
*tsk
)
240 u64 now
, nowscaled
, delta
, deltascaled
, sys_time
;
243 local_irq_save(flags
);
245 nowscaled
= read_spurr(now
);
246 delta
= now
- get_paca()->startpurr
;
247 deltascaled
= nowscaled
- get_paca()->startspurr
;
248 get_paca()->startpurr
= now
;
249 get_paca()->startspurr
= nowscaled
;
250 if (!in_interrupt()) {
251 /* deltascaled includes both user and system time.
252 * Hence scale it based on the purr ratio to estimate
254 sys_time
= get_paca()->system_time
;
255 if (get_paca()->user_time
)
256 deltascaled
= deltascaled
* sys_time
/
257 (sys_time
+ get_paca()->user_time
);
259 get_paca()->system_time
= 0;
261 if (in_irq() || idle_task(smp_processor_id()) != tsk
)
262 account_system_time(tsk
, 0, delta
, deltascaled
);
264 account_idle_time(delta
);
265 per_cpu(cputime_last_delta
, smp_processor_id()) = delta
;
266 per_cpu(cputime_scaled_last_delta
, smp_processor_id()) = deltascaled
;
267 local_irq_restore(flags
);
271 * Transfer the user and system times accumulated in the paca
272 * by the exception entry and exit code to the generic process
273 * user and system time records.
274 * Must be called with interrupts disabled.
276 void account_process_tick(struct task_struct
*tsk
, int user_tick
)
278 cputime_t utime
, utimescaled
;
280 utime
= get_paca()->user_time
;
281 get_paca()->user_time
= 0;
282 utimescaled
= cputime_to_scaled(utime
);
283 account_user_time(tsk
, utime
, utimescaled
);
287 * Stuff for accounting stolen time.
289 struct cpu_purr_data
{
290 int initialized
; /* thread is running */
291 u64 tb
; /* last TB value read */
292 u64 purr
; /* last PURR value read */
293 u64 spurr
; /* last SPURR value read */
297 * Each entry in the cpu_purr_data array is manipulated only by its
298 * "owner" cpu -- usually in the timer interrupt but also occasionally
299 * in process context for cpu online. As long as cpus do not touch
300 * each others' cpu_purr_data, disabling local interrupts is
301 * sufficient to serialize accesses.
303 static DEFINE_PER_CPU(struct cpu_purr_data
, cpu_purr_data
);
305 static void snapshot_tb_and_purr(void *data
)
308 struct cpu_purr_data
*p
= &__get_cpu_var(cpu_purr_data
);
310 local_irq_save(flags
);
311 p
->tb
= get_tb_or_rtc();
312 p
->purr
= mfspr(SPRN_PURR
);
315 local_irq_restore(flags
);
319 * Called during boot when all cpus have come up.
321 void snapshot_timebases(void)
323 if (!cpu_has_feature(CPU_FTR_PURR
))
325 on_each_cpu(snapshot_tb_and_purr
, NULL
, 1);
329 * Must be called with interrupts disabled.
331 void calculate_steal_time(void)
335 struct cpu_purr_data
*pme
;
337 pme
= &__get_cpu_var(cpu_purr_data
);
338 if (!pme
->initialized
)
339 return; /* !CPU_FTR_PURR or early in early boot */
341 purr
= mfspr(SPRN_PURR
);
342 stolen
= (tb
- pme
->tb
) - (purr
- pme
->purr
);
344 if (idle_task(smp_processor_id()) != current
)
345 account_steal_time(stolen
);
347 account_idle_time(stolen
);
353 #ifdef CONFIG_PPC_SPLPAR
355 * Must be called before the cpu is added to the online map when
356 * a cpu is being brought up at runtime.
358 static void snapshot_purr(void)
360 struct cpu_purr_data
*pme
;
363 if (!cpu_has_feature(CPU_FTR_PURR
))
365 local_irq_save(flags
);
366 pme
= &__get_cpu_var(cpu_purr_data
);
368 pme
->purr
= mfspr(SPRN_PURR
);
369 pme
->initialized
= 1;
370 local_irq_restore(flags
);
373 #endif /* CONFIG_PPC_SPLPAR */
375 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
376 #define calc_cputime_factors()
377 #define calculate_steal_time() do { } while (0)
380 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
381 #define snapshot_purr() do { } while (0)
385 * Called when a cpu comes up after the system has finished booting,
386 * i.e. as a result of a hotplug cpu action.
388 void snapshot_timebase(void)
390 __get_cpu_var(last_jiffy
) = get_tb_or_rtc();
394 void __delay(unsigned long loops
)
402 /* the RTCL register wraps at 1000000000 */
403 diff
= get_rtcl() - start
;
406 } while (diff
< loops
);
409 while (get_tbl() - start
< loops
)
414 EXPORT_SYMBOL(__delay
);
416 void udelay(unsigned long usecs
)
418 __delay(tb_ticks_per_usec
* usecs
);
420 EXPORT_SYMBOL(udelay
);
422 static inline void update_gtod(u64 new_tb_stamp
, u64 new_stamp_xsec
,
426 * tb_update_count is used to allow the userspace gettimeofday code
427 * to assure itself that it sees a consistent view of the tb_to_xs and
428 * stamp_xsec variables. It reads the tb_update_count, then reads
429 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
430 * the two values of tb_update_count match and are even then the
431 * tb_to_xs and stamp_xsec values are consistent. If not, then it
432 * loops back and reads them again until this criteria is met.
433 * We expect the caller to have done the first increment of
434 * vdso_data->tb_update_count already.
436 vdso_data
->tb_orig_stamp
= new_tb_stamp
;
437 vdso_data
->stamp_xsec
= new_stamp_xsec
;
438 vdso_data
->tb_to_xs
= new_tb_to_xs
;
439 vdso_data
->wtom_clock_sec
= wall_to_monotonic
.tv_sec
;
440 vdso_data
->wtom_clock_nsec
= wall_to_monotonic
.tv_nsec
;
441 vdso_data
->stamp_xtime
= xtime
;
443 ++(vdso_data
->tb_update_count
);
447 unsigned long profile_pc(struct pt_regs
*regs
)
449 unsigned long pc
= instruction_pointer(regs
);
451 if (in_lock_functions(pc
))
456 EXPORT_SYMBOL(profile_pc
);
459 #ifdef CONFIG_PPC_ISERIES
462 * This function recalibrates the timebase based on the 49-bit time-of-day
463 * value in the Titan chip. The Titan is much more accurate than the value
464 * returned by the service processor for the timebase frequency.
467 static int __init
iSeries_tb_recal(void)
469 struct div_result divres
;
470 unsigned long titan
, tb
;
472 /* Make sure we only run on iSeries */
473 if (!firmware_has_feature(FW_FEATURE_ISERIES
))
477 titan
= HvCallXm_loadTod();
478 if ( iSeries_recal_titan
) {
479 unsigned long tb_ticks
= tb
- iSeries_recal_tb
;
480 unsigned long titan_usec
= (titan
- iSeries_recal_titan
) >> 12;
481 unsigned long new_tb_ticks_per_sec
= (tb_ticks
* USEC_PER_SEC
)/titan_usec
;
482 unsigned long new_tb_ticks_per_jiffy
= (new_tb_ticks_per_sec
+(HZ
/2))/HZ
;
483 long tick_diff
= new_tb_ticks_per_jiffy
- tb_ticks_per_jiffy
;
485 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
486 new_tb_ticks_per_sec
= new_tb_ticks_per_jiffy
* HZ
;
488 if ( tick_diff
< 0 ) {
489 tick_diff
= -tick_diff
;
493 if ( tick_diff
< tb_ticks_per_jiffy
/25 ) {
494 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
495 new_tb_ticks_per_jiffy
, sign
, tick_diff
);
496 tb_ticks_per_jiffy
= new_tb_ticks_per_jiffy
;
497 tb_ticks_per_sec
= new_tb_ticks_per_sec
;
498 calc_cputime_factors();
499 div128_by_32( XSEC_PER_SEC
, 0, tb_ticks_per_sec
, &divres
);
500 tb_to_xs
= divres
.result_low
;
501 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
502 vdso_data
->tb_to_xs
= tb_to_xs
;
505 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
506 " new tb_ticks_per_jiffy = %lu\n"
507 " old tb_ticks_per_jiffy = %lu\n",
508 new_tb_ticks_per_jiffy
, tb_ticks_per_jiffy
);
512 iSeries_recal_titan
= titan
;
513 iSeries_recal_tb
= tb
;
515 /* Called here as now we know accurate values for the timebase */
519 late_initcall(iSeries_tb_recal
);
521 /* Called from platform early init */
522 void __init
iSeries_time_init_early(void)
524 iSeries_recal_tb
= get_tb();
525 iSeries_recal_titan
= HvCallXm_loadTod();
527 #endif /* CONFIG_PPC_ISERIES */
529 #if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
530 DEFINE_PER_CPU(u8
, perf_counter_pending
);
532 void set_perf_counter_pending(void)
534 get_cpu_var(perf_counter_pending
) = 1;
536 put_cpu_var(perf_counter_pending
);
539 #define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
540 #define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
542 #else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
544 #define test_perf_counter_pending() 0
545 #define clear_perf_counter_pending()
547 #endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
550 * For iSeries shared processors, we have to let the hypervisor
551 * set the hardware decrementer. We set a virtual decrementer
552 * in the lppaca and call the hypervisor if the virtual
553 * decrementer is less than the current value in the hardware
554 * decrementer. (almost always the new decrementer value will
555 * be greater than the current hardware decementer so the hypervisor
556 * call will not be needed)
560 * timer_interrupt - gets called when the decrementer overflows,
561 * with interrupts disabled.
563 void timer_interrupt(struct pt_regs
* regs
)
565 struct pt_regs
*old_regs
;
566 struct decrementer_clock
*decrementer
= &__get_cpu_var(decrementers
);
567 struct clock_event_device
*evt
= &decrementer
->event
;
570 /* Ensure a positive value is written to the decrementer, or else
571 * some CPUs will continuue to take decrementer exceptions */
572 set_dec(DECREMENTER_MAX
);
575 if (test_perf_counter_pending()) {
576 clear_perf_counter_pending();
577 perf_counter_do_pending();
579 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
583 now
= get_tb_or_rtc();
584 if (now
< decrementer
->next_tb
) {
585 /* not time for this event yet */
586 now
= decrementer
->next_tb
- now
;
587 if (now
<= DECREMENTER_MAX
)
591 old_regs
= set_irq_regs(regs
);
594 calculate_steal_time();
596 #ifdef CONFIG_PPC_ISERIES
597 if (firmware_has_feature(FW_FEATURE_ISERIES
))
598 get_lppaca()->int_dword
.fields
.decr_int
= 0;
601 if (evt
->event_handler
)
602 evt
->event_handler(evt
);
604 #ifdef CONFIG_PPC_ISERIES
605 if (firmware_has_feature(FW_FEATURE_ISERIES
) && hvlpevent_is_pending())
606 process_hvlpevents();
610 /* collect purr register values often, for accurate calculations */
611 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
612 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
613 cu
->current_tb
= mfspr(SPRN_PURR
);
618 set_irq_regs(old_regs
);
621 void wakeup_decrementer(void)
626 * The timebase gets saved on sleep and restored on wakeup,
627 * so all we need to do is to reset the decrementer.
629 ticks
= tb_ticks_since(__get_cpu_var(last_jiffy
));
630 if (ticks
< tb_ticks_per_jiffy
)
631 ticks
= tb_ticks_per_jiffy
- ticks
;
637 #ifdef CONFIG_SUSPEND
638 void generic_suspend_disable_irqs(void)
642 /* Disable the decrementer, so that it doesn't interfere
651 void generic_suspend_enable_irqs(void)
653 wakeup_decrementer();
659 /* Overrides the weak version in kernel/power/main.c */
660 void arch_suspend_disable_irqs(void)
662 if (ppc_md
.suspend_disable_irqs
)
663 ppc_md
.suspend_disable_irqs();
664 generic_suspend_disable_irqs();
667 /* Overrides the weak version in kernel/power/main.c */
668 void arch_suspend_enable_irqs(void)
670 generic_suspend_enable_irqs();
671 if (ppc_md
.suspend_enable_irqs
)
672 ppc_md
.suspend_enable_irqs();
677 void __init
smp_space_timers(unsigned int max_cpus
)
680 u64 previous_tb
= per_cpu(last_jiffy
, boot_cpuid
);
682 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
683 previous_tb
-= tb_ticks_per_jiffy
;
685 for_each_possible_cpu(i
) {
688 per_cpu(last_jiffy
, i
) = previous_tb
;
694 * Scheduler clock - returns current time in nanosec units.
696 * Note: mulhdu(a, b) (multiply high double unsigned) returns
697 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
698 * are 64-bit unsigned numbers.
700 unsigned long long sched_clock(void)
704 return mulhdu(get_tb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
707 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
709 struct device_node
*cpu
;
710 const unsigned int *fp
;
713 /* The cpu node should have timebase and clock frequency properties */
714 cpu
= of_find_node_by_type(NULL
, "cpu");
717 fp
= of_get_property(cpu
, name
, NULL
);
720 *val
= of_read_ulong(fp
, cells
);
729 void __init
generic_calibrate_decr(void)
731 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
733 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
734 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
736 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
740 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
742 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
743 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
745 printk(KERN_ERR
"WARNING: Estimating processor frequency "
749 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
750 /* Clear any pending timer interrupts */
751 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
753 /* Enable decrementer interrupt */
754 mtspr(SPRN_TCR
, TCR_DIE
);
758 int update_persistent_clock(struct timespec now
)
762 if (!ppc_md
.set_rtc_time
)
765 to_tm(now
.tv_sec
+ 1 + timezone_offset
, &tm
);
769 return ppc_md
.set_rtc_time(&tm
);
772 void read_persistent_clock(struct timespec
*ts
)
775 static int first
= 1;
778 /* XXX this is a litle fragile but will work okay in the short term */
781 if (ppc_md
.time_init
)
782 timezone_offset
= ppc_md
.time_init();
784 /* get_boot_time() isn't guaranteed to be safe to call late */
785 if (ppc_md
.get_boot_time
) {
786 ts
->tv_sec
= ppc_md
.get_boot_time() - timezone_offset
;
790 if (!ppc_md
.get_rtc_time
) {
794 ppc_md
.get_rtc_time(&tm
);
795 ts
->tv_sec
= mktime(tm
.tm_year
+1900, tm
.tm_mon
+1, tm
.tm_mday
,
796 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
799 /* clocksource code */
800 static cycle_t
rtc_read(struct clocksource
*cs
)
802 return (cycle_t
)get_rtc();
805 static cycle_t
timebase_read(struct clocksource
*cs
)
807 return (cycle_t
)get_tb();
810 void update_vsyscall(struct timespec
*wall_time
, struct clocksource
*clock
)
814 if (clock
!= &clocksource_timebase
)
817 /* Make userspace gettimeofday spin until we're done. */
818 ++vdso_data
->tb_update_count
;
821 /* XXX this assumes clock->shift == 22 */
822 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
823 t2x
= (u64
) clock
->mult
* 4611686018ULL;
824 stamp_xsec
= (u64
) xtime
.tv_nsec
* XSEC_PER_SEC
;
825 do_div(stamp_xsec
, 1000000000);
826 stamp_xsec
+= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
827 update_gtod(clock
->cycle_last
, stamp_xsec
, t2x
);
830 void update_vsyscall_tz(void)
832 /* Make userspace gettimeofday spin until we're done. */
833 ++vdso_data
->tb_update_count
;
835 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
836 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;
838 ++vdso_data
->tb_update_count
;
841 static void __init
clocksource_init(void)
843 struct clocksource
*clock
;
846 clock
= &clocksource_rtc
;
848 clock
= &clocksource_timebase
;
850 clock
->mult
= clocksource_hz2mult(tb_ticks_per_sec
, clock
->shift
);
852 if (clocksource_register(clock
)) {
853 printk(KERN_ERR
"clocksource: %s is already registered\n",
858 printk(KERN_INFO
"clocksource: %s mult[%x] shift[%d] registered\n",
859 clock
->name
, clock
->mult
, clock
->shift
);
862 static int decrementer_set_next_event(unsigned long evt
,
863 struct clock_event_device
*dev
)
865 __get_cpu_var(decrementers
).next_tb
= get_tb_or_rtc() + evt
;
870 static void decrementer_set_mode(enum clock_event_mode mode
,
871 struct clock_event_device
*dev
)
873 if (mode
!= CLOCK_EVT_MODE_ONESHOT
)
874 decrementer_set_next_event(DECREMENTER_MAX
, dev
);
877 static void __init
setup_clockevent_multiplier(unsigned long hz
)
879 u64 mult
, shift
= 32;
882 mult
= div_sc(hz
, NSEC_PER_SEC
, shift
);
883 if (mult
&& (mult
>> 32UL) == 0UL)
889 decrementer_clockevent
.shift
= shift
;
890 decrementer_clockevent
.mult
= mult
;
893 static void register_decrementer_clockevent(int cpu
)
895 struct clock_event_device
*dec
= &per_cpu(decrementers
, cpu
).event
;
897 *dec
= decrementer_clockevent
;
898 dec
->cpumask
= cpumask_of(cpu
);
900 printk(KERN_DEBUG
"clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
901 dec
->name
, dec
->mult
, dec
->shift
, cpu
);
903 clockevents_register_device(dec
);
906 static void __init
init_decrementer_clockevent(void)
908 int cpu
= smp_processor_id();
910 setup_clockevent_multiplier(ppc_tb_freq
);
911 decrementer_clockevent
.max_delta_ns
=
912 clockevent_delta2ns(DECREMENTER_MAX
, &decrementer_clockevent
);
913 decrementer_clockevent
.min_delta_ns
=
914 clockevent_delta2ns(2, &decrementer_clockevent
);
916 register_decrementer_clockevent(cpu
);
919 void secondary_cpu_time_init(void)
921 /* FIME: Should make unrelatred change to move snapshot_timebase
923 register_decrementer_clockevent(smp_processor_id());
926 /* This function is only called on the boot processor */
927 void __init
time_init(void)
930 struct div_result res
;
935 /* 601 processor: dec counts down by 128 every 128ns */
936 ppc_tb_freq
= 1000000000;
937 tb_last_jiffy
= get_rtcl();
939 /* Normal PowerPC with timebase register */
940 ppc_md
.calibrate_decr();
941 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
942 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
943 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
944 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
945 tb_last_jiffy
= get_tb();
948 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
949 tb_ticks_per_sec
= ppc_tb_freq
;
950 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
951 tb_to_us
= mulhwu_scale_factor(ppc_tb_freq
, 1000000);
952 calc_cputime_factors();
955 * Calculate the length of each tick in ns. It will not be
956 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
957 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
960 x
= (u64
) NSEC_PER_SEC
* tb_ticks_per_jiffy
+ ppc_tb_freq
- 1;
961 do_div(x
, ppc_tb_freq
);
963 last_tick_len
= x
<< TICKLEN_SCALE
;
966 * Compute ticklen_to_xs, which is a factor which gets multiplied
967 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
969 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
970 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
971 * which turns out to be N = 51 - SHIFT_HZ.
972 * This gives the result as a 0.64 fixed-point fraction.
973 * That value is reduced by an offset amounting to 1 xsec per
974 * 2^31 timebase ticks to avoid problems with time going backwards
975 * by 1 xsec when we do timer_recalc_offset due to losing the
976 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
977 * since there are 2^20 xsec in a second.
979 div128_by_32((1ULL << 51) - ppc_tb_freq
, 0,
980 tb_ticks_per_jiffy
<< SHIFT_HZ
, &res
);
981 div128_by_32(res
.result_high
, res
.result_low
, NSEC_PER_SEC
, &res
);
982 ticklen_to_xs
= res
.result_low
;
984 /* Compute tb_to_xs from tick_nsec */
985 tb_to_xs
= mulhdu(last_tick_len
<< TICKLEN_SHIFT
, ticklen_to_xs
);
988 * Compute scale factor for sched_clock.
989 * The calibrate_decr() function has set tb_ticks_per_sec,
990 * which is the timebase frequency.
991 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
992 * the 128-bit result as a 64.64 fixed-point number.
993 * We then shift that number right until it is less than 1.0,
994 * giving us the scale factor and shift count to use in
997 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
998 scale
= res
.result_low
;
999 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
1000 scale
= (scale
>> 1) | (res
.result_high
<< 63);
1001 res
.result_high
>>= 1;
1003 tb_to_ns_scale
= scale
;
1004 tb_to_ns_shift
= shift
;
1005 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1006 boot_tb
= get_tb_or_rtc();
1008 write_seqlock_irqsave(&xtime_lock
, flags
);
1010 /* If platform provided a timezone (pmac), we correct the time */
1011 if (timezone_offset
) {
1012 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
1013 sys_tz
.tz_dsttime
= 0;
1016 vdso_data
->tb_orig_stamp
= tb_last_jiffy
;
1017 vdso_data
->tb_update_count
= 0;
1018 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
1019 vdso_data
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
1020 vdso_data
->tb_to_xs
= tb_to_xs
;
1022 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1024 /* Register the clocksource, if we're not running on iSeries */
1025 if (!firmware_has_feature(FW_FEATURE_ISERIES
))
1028 init_decrementer_clockevent();
1033 #define STARTOFTIME 1970
1034 #define SECDAY 86400L
1035 #define SECYR (SECDAY * 365)
1036 #define leapyear(year) ((year) % 4 == 0 && \
1037 ((year) % 100 != 0 || (year) % 400 == 0))
1038 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1039 #define days_in_month(a) (month_days[(a) - 1])
1041 static int month_days
[12] = {
1042 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1046 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1048 void GregorianDay(struct rtc_time
* tm
)
1053 int MonthOffset
[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1055 lastYear
= tm
->tm_year
- 1;
1058 * Number of leap corrections to apply up to end of last year
1060 leapsToDate
= lastYear
/ 4 - lastYear
/ 100 + lastYear
/ 400;
1063 * This year is a leap year if it is divisible by 4 except when it is
1064 * divisible by 100 unless it is divisible by 400
1066 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1068 day
= tm
->tm_mon
> 2 && leapyear(tm
->tm_year
);
1070 day
+= lastYear
*365 + leapsToDate
+ MonthOffset
[tm
->tm_mon
-1] +
1073 tm
->tm_wday
= day
% 7;
1076 void to_tm(int tim
, struct rtc_time
* tm
)
1079 register long hms
, day
;
1084 /* Hours, minutes, seconds are easy */
1085 tm
->tm_hour
= hms
/ 3600;
1086 tm
->tm_min
= (hms
% 3600) / 60;
1087 tm
->tm_sec
= (hms
% 3600) % 60;
1089 /* Number of years in days */
1090 for (i
= STARTOFTIME
; day
>= days_in_year(i
); i
++)
1091 day
-= days_in_year(i
);
1094 /* Number of months in days left */
1095 if (leapyear(tm
->tm_year
))
1096 days_in_month(FEBRUARY
) = 29;
1097 for (i
= 1; day
>= days_in_month(i
); i
++)
1098 day
-= days_in_month(i
);
1099 days_in_month(FEBRUARY
) = 28;
1102 /* Days are what is left over (+1) from all that. */
1103 tm
->tm_mday
= day
+ 1;
1106 * Determine the day of week
1111 /* Auxiliary function to compute scaling factors */
1112 /* Actually the choice of a timebase running at 1/4 the of the bus
1113 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1114 * It makes this computation very precise (27-28 bits typically) which
1115 * is optimistic considering the stability of most processor clock
1116 * oscillators and the precision with which the timebase frequency
1117 * is measured but does not harm.
1119 unsigned mulhwu_scale_factor(unsigned inscale
, unsigned outscale
)
1121 unsigned mlt
=0, tmp
, err
;
1122 /* No concern for performance, it's done once: use a stupid
1123 * but safe and compact method to find the multiplier.
1126 for (tmp
= 1U<<31; tmp
!= 0; tmp
>>= 1) {
1127 if (mulhwu(inscale
, mlt
|tmp
) < outscale
)
1131 /* We might still be off by 1 for the best approximation.
1132 * A side effect of this is that if outscale is too large
1133 * the returned value will be zero.
1134 * Many corner cases have been checked and seem to work,
1135 * some might have been forgotten in the test however.
1138 err
= inscale
* (mlt
+1);
1139 if (err
<= inscale
/2)
1145 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1148 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1149 unsigned divisor
, struct div_result
*dr
)
1151 unsigned long a
, b
, c
, d
;
1152 unsigned long w
, x
, y
, z
;
1155 a
= dividend_high
>> 32;
1156 b
= dividend_high
& 0xffffffff;
1157 c
= dividend_low
>> 32;
1158 d
= dividend_low
& 0xffffffff;
1161 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1163 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1166 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1169 do_div(rc
, divisor
);
1172 dr
->result_high
= ((u64
)w
<< 32) + x
;
1173 dr
->result_low
= ((u64
)y
<< 32) + z
;
1177 /* We don't need to calibrate delay, we use the CPU timebase for that */
1178 void calibrate_delay(void)
1180 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1181 * as the number of __delay(1) in a jiffy, so make it so
1183 loops_per_jiffy
= tb_ticks_per_jiffy
;
1186 static int __init
rtc_init(void)
1188 struct platform_device
*pdev
;
1190 if (!ppc_md
.get_rtc_time
)
1193 pdev
= platform_device_register_simple("rtc-generic", -1, NULL
, 0);
1195 return PTR_ERR(pdev
);
1200 module_init(rtc_init
);