2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
65 #include <asm/div64.h>
67 #include <asm/vdso_datapage.h>
68 #include <asm/firmware.h>
69 #ifdef CONFIG_PPC_ISERIES
70 #include <asm/iseries/it_lp_queue.h>
71 #include <asm/iseries/hv_call_xm.h>
74 /* powerpc clocksource/clockevent code */
76 #include <linux/clockchips.h>
77 #include <linux/clocksource.h>
79 static cycle_t
rtc_read(void);
80 static struct clocksource clocksource_rtc
= {
83 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
84 .mask
= CLOCKSOURCE_MASK(64),
86 .mult
= 0, /* To be filled in */
90 static cycle_t
timebase_read(void);
91 static struct clocksource clocksource_timebase
= {
94 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
95 .mask
= CLOCKSOURCE_MASK(64),
97 .mult
= 0, /* To be filled in */
98 .read
= timebase_read
,
101 #define DECREMENTER_MAX 0x7fffffff
103 static int decrementer_set_next_event(unsigned long evt
,
104 struct clock_event_device
*dev
);
105 static void decrementer_set_mode(enum clock_event_mode mode
,
106 struct clock_event_device
*dev
);
108 static struct clock_event_device decrementer_clockevent
= {
109 .name
= "decrementer",
112 .mult
= 0, /* To be filled in */
114 .set_next_event
= decrementer_set_next_event
,
115 .set_mode
= decrementer_set_mode
,
116 .features
= CLOCK_EVT_FEAT_ONESHOT
,
119 static DEFINE_PER_CPU(struct clock_event_device
, decrementers
);
120 void init_decrementer_clockevent(void);
122 #ifdef CONFIG_PPC_ISERIES
123 static unsigned long __initdata iSeries_recal_titan
;
124 static signed long __initdata iSeries_recal_tb
;
126 /* Forward declaration is only needed for iSereis compiles */
127 void __init
clocksource_init(void);
130 #define XSEC_PER_SEC (1024*1024)
133 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
135 /* compute ((xsec << 12) * max) >> 32 */
136 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
139 unsigned long tb_ticks_per_jiffy
;
140 unsigned long tb_ticks_per_usec
= 100; /* sane default */
141 EXPORT_SYMBOL(tb_ticks_per_usec
);
142 unsigned long tb_ticks_per_sec
;
143 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
147 #define TICKLEN_SCALE TICK_LENGTH_SHIFT
148 u64 last_tick_len
; /* units are ns / 2^TICKLEN_SCALE */
149 u64 ticklen_to_xs
; /* 0.64 fraction */
151 /* If last_tick_len corresponds to about 1/HZ seconds, then
152 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
153 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
155 DEFINE_SPINLOCK(rtc_lock
);
156 EXPORT_SYMBOL_GPL(rtc_lock
);
158 static u64 tb_to_ns_scale __read_mostly
;
159 static unsigned tb_to_ns_shift __read_mostly
;
160 static unsigned long boot_tb __read_mostly
;
162 struct gettimeofday_struct do_gtod
;
164 extern struct timezone sys_tz
;
165 static long timezone_offset
;
167 unsigned long ppc_proc_freq
;
168 EXPORT_SYMBOL(ppc_proc_freq
);
169 unsigned long ppc_tb_freq
;
171 static u64 tb_last_jiffy __cacheline_aligned_in_smp
;
172 static DEFINE_PER_CPU(u64
, last_jiffy
);
174 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
176 * Factors for converting from cputime_t (timebase ticks) to
177 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
178 * These are all stored as 0.64 fixed-point binary fractions.
180 u64 __cputime_jiffies_factor
;
181 EXPORT_SYMBOL(__cputime_jiffies_factor
);
182 u64 __cputime_msec_factor
;
183 EXPORT_SYMBOL(__cputime_msec_factor
);
184 u64 __cputime_sec_factor
;
185 EXPORT_SYMBOL(__cputime_sec_factor
);
186 u64 __cputime_clockt_factor
;
187 EXPORT_SYMBOL(__cputime_clockt_factor
);
189 static void calc_cputime_factors(void)
191 struct div_result res
;
193 div128_by_32(HZ
, 0, tb_ticks_per_sec
, &res
);
194 __cputime_jiffies_factor
= res
.result_low
;
195 div128_by_32(1000, 0, tb_ticks_per_sec
, &res
);
196 __cputime_msec_factor
= res
.result_low
;
197 div128_by_32(1, 0, tb_ticks_per_sec
, &res
);
198 __cputime_sec_factor
= res
.result_low
;
199 div128_by_32(USER_HZ
, 0, tb_ticks_per_sec
, &res
);
200 __cputime_clockt_factor
= res
.result_low
;
204 * Read the PURR on systems that have it, otherwise the timebase.
206 static u64
read_purr(void)
208 if (cpu_has_feature(CPU_FTR_PURR
))
209 return mfspr(SPRN_PURR
);
214 * Account time for a transition between system, hard irq
217 void account_system_vtime(struct task_struct
*tsk
)
222 local_irq_save(flags
);
224 delta
= now
- get_paca()->startpurr
;
225 get_paca()->startpurr
= now
;
226 if (!in_interrupt()) {
227 delta
+= get_paca()->system_time
;
228 get_paca()->system_time
= 0;
230 account_system_time(tsk
, 0, delta
);
231 local_irq_restore(flags
);
235 * Transfer the user and system times accumulated in the paca
236 * by the exception entry and exit code to the generic process
237 * user and system time records.
238 * Must be called with interrupts disabled.
240 void account_process_vtime(struct task_struct
*tsk
)
244 utime
= get_paca()->user_time
;
245 get_paca()->user_time
= 0;
246 account_user_time(tsk
, utime
);
249 static void account_process_time(struct pt_regs
*regs
)
251 int cpu
= smp_processor_id();
253 account_process_vtime(current
);
255 if (rcu_pending(cpu
))
256 rcu_check_callbacks(cpu
, user_mode(regs
));
258 run_posix_cpu_timers(current
);
262 * Stuff for accounting stolen time.
264 struct cpu_purr_data
{
265 int initialized
; /* thread is running */
266 u64 tb
; /* last TB value read */
267 u64 purr
; /* last PURR value read */
271 * Each entry in the cpu_purr_data array is manipulated only by its
272 * "owner" cpu -- usually in the timer interrupt but also occasionally
273 * in process context for cpu online. As long as cpus do not touch
274 * each others' cpu_purr_data, disabling local interrupts is
275 * sufficient to serialize accesses.
277 static DEFINE_PER_CPU(struct cpu_purr_data
, cpu_purr_data
);
279 static void snapshot_tb_and_purr(void *data
)
282 struct cpu_purr_data
*p
= &__get_cpu_var(cpu_purr_data
);
284 local_irq_save(flags
);
285 p
->tb
= get_tb_or_rtc();
286 p
->purr
= mfspr(SPRN_PURR
);
289 local_irq_restore(flags
);
293 * Called during boot when all cpus have come up.
295 void snapshot_timebases(void)
297 if (!cpu_has_feature(CPU_FTR_PURR
))
299 on_each_cpu(snapshot_tb_and_purr
, NULL
, 0, 1);
303 * Must be called with interrupts disabled.
305 void calculate_steal_time(void)
309 struct cpu_purr_data
*pme
;
311 if (!cpu_has_feature(CPU_FTR_PURR
))
313 pme
= &per_cpu(cpu_purr_data
, smp_processor_id());
314 if (!pme
->initialized
)
315 return; /* this can happen in early boot */
317 purr
= mfspr(SPRN_PURR
);
318 stolen
= (tb
- pme
->tb
) - (purr
- pme
->purr
);
320 account_steal_time(current
, stolen
);
325 #ifdef CONFIG_PPC_SPLPAR
327 * Must be called before the cpu is added to the online map when
328 * a cpu is being brought up at runtime.
330 static void snapshot_purr(void)
332 struct cpu_purr_data
*pme
;
335 if (!cpu_has_feature(CPU_FTR_PURR
))
337 local_irq_save(flags
);
338 pme
= &per_cpu(cpu_purr_data
, smp_processor_id());
340 pme
->purr
= mfspr(SPRN_PURR
);
341 pme
->initialized
= 1;
342 local_irq_restore(flags
);
345 #endif /* CONFIG_PPC_SPLPAR */
347 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
348 #define calc_cputime_factors()
349 #define account_process_time(regs) update_process_times(user_mode(regs))
350 #define calculate_steal_time() do { } while (0)
353 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
354 #define snapshot_purr() do { } while (0)
358 * Called when a cpu comes up after the system has finished booting,
359 * i.e. as a result of a hotplug cpu action.
361 void snapshot_timebase(void)
363 __get_cpu_var(last_jiffy
) = get_tb_or_rtc();
367 void __delay(unsigned long loops
)
375 /* the RTCL register wraps at 1000000000 */
376 diff
= get_rtcl() - start
;
379 } while (diff
< loops
);
382 while (get_tbl() - start
< loops
)
387 EXPORT_SYMBOL(__delay
);
389 void udelay(unsigned long usecs
)
391 __delay(tb_ticks_per_usec
* usecs
);
393 EXPORT_SYMBOL(udelay
);
397 * There are two copies of tb_to_xs and stamp_xsec so that no
398 * lock is needed to access and use these values in
399 * do_gettimeofday. We alternate the copies and as long as a
400 * reasonable time elapses between changes, there will never
401 * be inconsistent values. ntpd has a minimum of one minute
404 static inline void update_gtod(u64 new_tb_stamp
, u64 new_stamp_xsec
,
408 struct gettimeofday_vars
*temp_varp
;
410 temp_idx
= (do_gtod
.var_idx
== 0);
411 temp_varp
= &do_gtod
.vars
[temp_idx
];
413 temp_varp
->tb_to_xs
= new_tb_to_xs
;
414 temp_varp
->tb_orig_stamp
= new_tb_stamp
;
415 temp_varp
->stamp_xsec
= new_stamp_xsec
;
417 do_gtod
.varp
= temp_varp
;
418 do_gtod
.var_idx
= temp_idx
;
421 * tb_update_count is used to allow the userspace gettimeofday code
422 * to assure itself that it sees a consistent view of the tb_to_xs and
423 * stamp_xsec variables. It reads the tb_update_count, then reads
424 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
425 * the two values of tb_update_count match and are even then the
426 * tb_to_xs and stamp_xsec values are consistent. If not, then it
427 * loops back and reads them again until this criteria is met.
428 * We expect the caller to have done the first increment of
429 * vdso_data->tb_update_count already.
431 vdso_data
->tb_orig_stamp
= new_tb_stamp
;
432 vdso_data
->stamp_xsec
= new_stamp_xsec
;
433 vdso_data
->tb_to_xs
= new_tb_to_xs
;
434 vdso_data
->wtom_clock_sec
= wall_to_monotonic
.tv_sec
;
435 vdso_data
->wtom_clock_nsec
= wall_to_monotonic
.tv_nsec
;
437 ++(vdso_data
->tb_update_count
);
441 unsigned long profile_pc(struct pt_regs
*regs
)
443 unsigned long pc
= instruction_pointer(regs
);
445 if (in_lock_functions(pc
))
450 EXPORT_SYMBOL(profile_pc
);
453 #ifdef CONFIG_PPC_ISERIES
456 * This function recalibrates the timebase based on the 49-bit time-of-day
457 * value in the Titan chip. The Titan is much more accurate than the value
458 * returned by the service processor for the timebase frequency.
461 static int __init
iSeries_tb_recal(void)
463 struct div_result divres
;
464 unsigned long titan
, tb
;
466 /* Make sure we only run on iSeries */
467 if (!firmware_has_feature(FW_FEATURE_ISERIES
))
471 titan
= HvCallXm_loadTod();
472 if ( iSeries_recal_titan
) {
473 unsigned long tb_ticks
= tb
- iSeries_recal_tb
;
474 unsigned long titan_usec
= (titan
- iSeries_recal_titan
) >> 12;
475 unsigned long new_tb_ticks_per_sec
= (tb_ticks
* USEC_PER_SEC
)/titan_usec
;
476 unsigned long new_tb_ticks_per_jiffy
= (new_tb_ticks_per_sec
+(HZ
/2))/HZ
;
477 long tick_diff
= new_tb_ticks_per_jiffy
- tb_ticks_per_jiffy
;
479 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
480 new_tb_ticks_per_sec
= new_tb_ticks_per_jiffy
* HZ
;
482 if ( tick_diff
< 0 ) {
483 tick_diff
= -tick_diff
;
487 if ( tick_diff
< tb_ticks_per_jiffy
/25 ) {
488 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
489 new_tb_ticks_per_jiffy
, sign
, tick_diff
);
490 tb_ticks_per_jiffy
= new_tb_ticks_per_jiffy
;
491 tb_ticks_per_sec
= new_tb_ticks_per_sec
;
492 calc_cputime_factors();
493 div128_by_32( XSEC_PER_SEC
, 0, tb_ticks_per_sec
, &divres
);
494 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
495 tb_to_xs
= divres
.result_low
;
496 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
497 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
498 vdso_data
->tb_to_xs
= tb_to_xs
;
501 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
502 " new tb_ticks_per_jiffy = %lu\n"
503 " old tb_ticks_per_jiffy = %lu\n",
504 new_tb_ticks_per_jiffy
, tb_ticks_per_jiffy
);
508 iSeries_recal_titan
= titan
;
509 iSeries_recal_tb
= tb
;
511 /* Called here as now we know accurate values for the timebase */
515 late_initcall(iSeries_tb_recal
);
517 /* Called from platform early init */
518 void __init
iSeries_time_init_early(void)
520 iSeries_recal_tb
= get_tb();
521 iSeries_recal_titan
= HvCallXm_loadTod();
523 #endif /* CONFIG_PPC_ISERIES */
526 * For iSeries shared processors, we have to let the hypervisor
527 * set the hardware decrementer. We set a virtual decrementer
528 * in the lppaca and call the hypervisor if the virtual
529 * decrementer is less than the current value in the hardware
530 * decrementer. (almost always the new decrementer value will
531 * be greater than the current hardware decementer so the hypervisor
532 * call will not be needed)
536 * timer_interrupt - gets called when the decrementer overflows,
537 * with interrupts disabled.
539 void timer_interrupt(struct pt_regs
* regs
)
541 struct pt_regs
*old_regs
;
542 int cpu
= smp_processor_id();
543 struct clock_event_device
*evt
= &per_cpu(decrementers
, cpu
);
545 /* Ensure a positive value is written to the decrementer, or else
546 * some CPUs will continuue to take decrementer exceptions */
547 set_dec(DECREMENTER_MAX
);
550 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
554 old_regs
= set_irq_regs(regs
);
557 calculate_steal_time();
559 #ifdef CONFIG_PPC_ISERIES
560 if (firmware_has_feature(FW_FEATURE_ISERIES
))
561 get_lppaca()->int_dword
.fields
.decr_int
= 0;
565 * We cannot disable the decrementer, so in the period
566 * between this cpu's being marked offline in cpu_online_map
567 * and calling stop-self, it is taking timer interrupts.
568 * Avoid calling into the scheduler rebalancing code if this
571 if (!cpu_is_offline(cpu
))
572 account_process_time(regs
);
574 if (evt
->event_handler
)
575 evt
->event_handler(evt
);
577 evt
->set_next_event(DECREMENTER_MAX
, evt
);
579 #ifdef CONFIG_PPC_ISERIES
580 if (firmware_has_feature(FW_FEATURE_ISERIES
) && hvlpevent_is_pending())
581 process_hvlpevents();
585 /* collect purr register values often, for accurate calculations */
586 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
587 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
588 cu
->current_tb
= mfspr(SPRN_PURR
);
593 set_irq_regs(old_regs
);
596 void wakeup_decrementer(void)
601 * The timebase gets saved on sleep and restored on wakeup,
602 * so all we need to do is to reset the decrementer.
604 ticks
= tb_ticks_since(__get_cpu_var(last_jiffy
));
605 if (ticks
< tb_ticks_per_jiffy
)
606 ticks
= tb_ticks_per_jiffy
- ticks
;
613 void __init
smp_space_timers(unsigned int max_cpus
)
616 u64 previous_tb
= per_cpu(last_jiffy
, boot_cpuid
);
618 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
619 previous_tb
-= tb_ticks_per_jiffy
;
621 for_each_possible_cpu(i
) {
624 per_cpu(last_jiffy
, i
) = previous_tb
;
630 * Scheduler clock - returns current time in nanosec units.
632 * Note: mulhdu(a, b) (multiply high double unsigned) returns
633 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
634 * are 64-bit unsigned numbers.
636 unsigned long long sched_clock(void)
640 return mulhdu(get_tb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
643 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
645 struct device_node
*cpu
;
646 const unsigned int *fp
;
649 /* The cpu node should have timebase and clock frequency properties */
650 cpu
= of_find_node_by_type(NULL
, "cpu");
653 fp
= of_get_property(cpu
, name
, NULL
);
656 *val
= of_read_ulong(fp
, cells
);
665 void __init
generic_calibrate_decr(void)
667 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
669 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
670 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
672 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
676 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
678 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
679 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
681 printk(KERN_ERR
"WARNING: Estimating processor frequency "
685 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
686 /* Set the time base to zero */
690 /* Clear any pending timer interrupts */
691 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
693 /* Enable decrementer interrupt */
694 mtspr(SPRN_TCR
, TCR_DIE
);
698 int update_persistent_clock(struct timespec now
)
702 if (!ppc_md
.set_rtc_time
)
705 to_tm(now
.tv_sec
+ 1 + timezone_offset
, &tm
);
709 return ppc_md
.set_rtc_time(&tm
);
712 unsigned long read_persistent_clock(void)
715 static int first
= 1;
717 /* XXX this is a litle fragile but will work okay in the short term */
720 if (ppc_md
.time_init
)
721 timezone_offset
= ppc_md
.time_init();
723 /* get_boot_time() isn't guaranteed to be safe to call late */
724 if (ppc_md
.get_boot_time
)
725 return ppc_md
.get_boot_time() -timezone_offset
;
727 if (!ppc_md
.get_rtc_time
)
729 ppc_md
.get_rtc_time(&tm
);
730 return mktime(tm
.tm_year
+1900, tm
.tm_mon
+1, tm
.tm_mday
,
731 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
734 /* clocksource code */
735 static cycle_t
rtc_read(void)
737 return (cycle_t
)get_rtc();
740 static cycle_t
timebase_read(void)
742 return (cycle_t
)get_tb();
745 void update_vsyscall(struct timespec
*wall_time
, struct clocksource
*clock
)
749 if (clock
!= &clocksource_timebase
)
752 /* Make userspace gettimeofday spin until we're done. */
753 ++vdso_data
->tb_update_count
;
756 /* XXX this assumes clock->shift == 22 */
757 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
758 t2x
= (u64
) clock
->mult
* 4611686018ULL;
759 stamp_xsec
= (u64
) xtime
.tv_nsec
* XSEC_PER_SEC
;
760 do_div(stamp_xsec
, 1000000000);
761 stamp_xsec
+= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
762 update_gtod(clock
->cycle_last
, stamp_xsec
, t2x
);
765 void update_vsyscall_tz(void)
767 /* Make userspace gettimeofday spin until we're done. */
768 ++vdso_data
->tb_update_count
;
770 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
771 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;
773 ++vdso_data
->tb_update_count
;
776 void __init
clocksource_init(void)
778 struct clocksource
*clock
;
781 clock
= &clocksource_rtc
;
783 clock
= &clocksource_timebase
;
785 clock
->mult
= clocksource_hz2mult(tb_ticks_per_sec
, clock
->shift
);
787 if (clocksource_register(clock
)) {
788 printk(KERN_ERR
"clocksource: %s is already registered\n",
793 printk(KERN_INFO
"clocksource: %s mult[%x] shift[%d] registered\n",
794 clock
->name
, clock
->mult
, clock
->shift
);
797 static int decrementer_set_next_event(unsigned long evt
,
798 struct clock_event_device
*dev
)
804 static void decrementer_set_mode(enum clock_event_mode mode
,
805 struct clock_event_device
*dev
)
807 if (mode
!= CLOCK_EVT_MODE_ONESHOT
)
808 decrementer_set_next_event(DECREMENTER_MAX
, dev
);
811 static void register_decrementer_clockevent(int cpu
)
813 struct clock_event_device
*dec
= &per_cpu(decrementers
, cpu
);
815 *dec
= decrementer_clockevent
;
816 dec
->cpumask
= cpumask_of_cpu(cpu
);
818 printk(KERN_ERR
"clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
819 dec
->name
, dec
->mult
, dec
->shift
, cpu
);
821 clockevents_register_device(dec
);
824 void init_decrementer_clockevent(void)
826 int cpu
= smp_processor_id();
828 decrementer_clockevent
.mult
= div_sc(ppc_tb_freq
, NSEC_PER_SEC
,
829 decrementer_clockevent
.shift
);
830 decrementer_clockevent
.max_delta_ns
=
831 clockevent_delta2ns(DECREMENTER_MAX
, &decrementer_clockevent
);
832 decrementer_clockevent
.min_delta_ns
= 1000;
834 register_decrementer_clockevent(cpu
);
837 void secondary_cpu_time_init(void)
839 /* FIME: Should make unrelatred change to move snapshot_timebase
841 register_decrementer_clockevent(smp_processor_id());
844 /* This function is only called on the boot processor */
845 void __init
time_init(void)
848 struct div_result res
;
853 /* 601 processor: dec counts down by 128 every 128ns */
854 ppc_tb_freq
= 1000000000;
855 tb_last_jiffy
= get_rtcl();
857 /* Normal PowerPC with timebase register */
858 ppc_md
.calibrate_decr();
859 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
860 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
861 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
862 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
863 tb_last_jiffy
= get_tb();
866 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
867 tb_ticks_per_sec
= ppc_tb_freq
;
868 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
869 tb_to_us
= mulhwu_scale_factor(ppc_tb_freq
, 1000000);
870 calc_cputime_factors();
873 * Calculate the length of each tick in ns. It will not be
874 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
875 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
878 x
= (u64
) NSEC_PER_SEC
* tb_ticks_per_jiffy
+ ppc_tb_freq
- 1;
879 do_div(x
, ppc_tb_freq
);
881 last_tick_len
= x
<< TICKLEN_SCALE
;
884 * Compute ticklen_to_xs, which is a factor which gets multiplied
885 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
887 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
888 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
889 * which turns out to be N = 51 - SHIFT_HZ.
890 * This gives the result as a 0.64 fixed-point fraction.
891 * That value is reduced by an offset amounting to 1 xsec per
892 * 2^31 timebase ticks to avoid problems with time going backwards
893 * by 1 xsec when we do timer_recalc_offset due to losing the
894 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
895 * since there are 2^20 xsec in a second.
897 div128_by_32((1ULL << 51) - ppc_tb_freq
, 0,
898 tb_ticks_per_jiffy
<< SHIFT_HZ
, &res
);
899 div128_by_32(res
.result_high
, res
.result_low
, NSEC_PER_SEC
, &res
);
900 ticklen_to_xs
= res
.result_low
;
902 /* Compute tb_to_xs from tick_nsec */
903 tb_to_xs
= mulhdu(last_tick_len
<< TICKLEN_SHIFT
, ticklen_to_xs
);
906 * Compute scale factor for sched_clock.
907 * The calibrate_decr() function has set tb_ticks_per_sec,
908 * which is the timebase frequency.
909 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
910 * the 128-bit result as a 64.64 fixed-point number.
911 * We then shift that number right until it is less than 1.0,
912 * giving us the scale factor and shift count to use in
915 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
916 scale
= res
.result_low
;
917 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
918 scale
= (scale
>> 1) | (res
.result_high
<< 63);
919 res
.result_high
>>= 1;
921 tb_to_ns_scale
= scale
;
922 tb_to_ns_shift
= shift
;
923 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
924 boot_tb
= get_tb_or_rtc();
926 write_seqlock_irqsave(&xtime_lock
, flags
);
928 /* If platform provided a timezone (pmac), we correct the time */
929 if (timezone_offset
) {
930 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
931 sys_tz
.tz_dsttime
= 0;
934 do_gtod
.varp
= &do_gtod
.vars
[0];
936 do_gtod
.varp
->tb_orig_stamp
= tb_last_jiffy
;
937 __get_cpu_var(last_jiffy
) = tb_last_jiffy
;
938 do_gtod
.varp
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
939 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
940 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
941 do_gtod
.tb_to_us
= tb_to_us
;
943 vdso_data
->tb_orig_stamp
= tb_last_jiffy
;
944 vdso_data
->tb_update_count
= 0;
945 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
946 vdso_data
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
947 vdso_data
->tb_to_xs
= tb_to_xs
;
951 write_sequnlock_irqrestore(&xtime_lock
, flags
);
953 /* Register the clocksource, if we're not running on iSeries */
954 if (!firmware_has_feature(FW_FEATURE_ISERIES
))
957 init_decrementer_clockevent();
962 #define STARTOFTIME 1970
963 #define SECDAY 86400L
964 #define SECYR (SECDAY * 365)
965 #define leapyear(year) ((year) % 4 == 0 && \
966 ((year) % 100 != 0 || (year) % 400 == 0))
967 #define days_in_year(a) (leapyear(a) ? 366 : 365)
968 #define days_in_month(a) (month_days[(a) - 1])
970 static int month_days
[12] = {
971 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
975 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
977 void GregorianDay(struct rtc_time
* tm
)
982 int MonthOffset
[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
984 lastYear
= tm
->tm_year
- 1;
987 * Number of leap corrections to apply up to end of last year
989 leapsToDate
= lastYear
/ 4 - lastYear
/ 100 + lastYear
/ 400;
992 * This year is a leap year if it is divisible by 4 except when it is
993 * divisible by 100 unless it is divisible by 400
995 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
997 day
= tm
->tm_mon
> 2 && leapyear(tm
->tm_year
);
999 day
+= lastYear
*365 + leapsToDate
+ MonthOffset
[tm
->tm_mon
-1] +
1002 tm
->tm_wday
= day
% 7;
1005 void to_tm(int tim
, struct rtc_time
* tm
)
1008 register long hms
, day
;
1013 /* Hours, minutes, seconds are easy */
1014 tm
->tm_hour
= hms
/ 3600;
1015 tm
->tm_min
= (hms
% 3600) / 60;
1016 tm
->tm_sec
= (hms
% 3600) % 60;
1018 /* Number of years in days */
1019 for (i
= STARTOFTIME
; day
>= days_in_year(i
); i
++)
1020 day
-= days_in_year(i
);
1023 /* Number of months in days left */
1024 if (leapyear(tm
->tm_year
))
1025 days_in_month(FEBRUARY
) = 29;
1026 for (i
= 1; day
>= days_in_month(i
); i
++)
1027 day
-= days_in_month(i
);
1028 days_in_month(FEBRUARY
) = 28;
1031 /* Days are what is left over (+1) from all that. */
1032 tm
->tm_mday
= day
+ 1;
1035 * Determine the day of week
1040 /* Auxiliary function to compute scaling factors */
1041 /* Actually the choice of a timebase running at 1/4 the of the bus
1042 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1043 * It makes this computation very precise (27-28 bits typically) which
1044 * is optimistic considering the stability of most processor clock
1045 * oscillators and the precision with which the timebase frequency
1046 * is measured but does not harm.
1048 unsigned mulhwu_scale_factor(unsigned inscale
, unsigned outscale
)
1050 unsigned mlt
=0, tmp
, err
;
1051 /* No concern for performance, it's done once: use a stupid
1052 * but safe and compact method to find the multiplier.
1055 for (tmp
= 1U<<31; tmp
!= 0; tmp
>>= 1) {
1056 if (mulhwu(inscale
, mlt
|tmp
) < outscale
)
1060 /* We might still be off by 1 for the best approximation.
1061 * A side effect of this is that if outscale is too large
1062 * the returned value will be zero.
1063 * Many corner cases have been checked and seem to work,
1064 * some might have been forgotten in the test however.
1067 err
= inscale
* (mlt
+1);
1068 if (err
<= inscale
/2)
1074 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1077 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1078 unsigned divisor
, struct div_result
*dr
)
1080 unsigned long a
, b
, c
, d
;
1081 unsigned long w
, x
, y
, z
;
1084 a
= dividend_high
>> 32;
1085 b
= dividend_high
& 0xffffffff;
1086 c
= dividend_low
>> 32;
1087 d
= dividend_low
& 0xffffffff;
1090 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1092 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1095 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1098 do_div(rc
, divisor
);
1101 dr
->result_high
= ((u64
)w
<< 32) + x
;
1102 dr
->result_low
= ((u64
)y
<< 32) + z
;