2 * linux/arch/parisc/kernel/time.c
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
13 #include <linux/config.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/param.h>
19 #include <linux/string.h>
21 #include <linux/interrupt.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/profile.h>
27 #include <asm/uaccess.h>
30 #include <asm/param.h>
34 #include <linux/timex.h>
36 /* xtime and wall_jiffies keep wall-clock time */
37 extern unsigned long wall_jiffies
;
39 static long clocktick
; /* timer cycles per tick */
43 extern void smp_do_timer(struct pt_regs
*regs
);
46 irqreturn_t
timer_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
51 int cpu
= smp_processor_id();
53 profile_tick(CPU_PROFILING
, regs
);
56 /* initialize next_tick to time at last clocktick */
57 next_tick
= cpu_data
[cpu
].it_value
;
59 /* since time passes between the interrupt and the mfctl()
60 * above, it is never true that last_tick + clocktick == now. If we
61 * never miss a clocktick, we could set next_tick = last_tick + clocktick
62 * but maybe we'll miss ticks, hence the loop.
64 * Variables are *signed*.
68 while((next_tick
- now
) < halftick
) {
69 next_tick
+= clocktick
;
73 cpu_data
[cpu
].it_value
= next_tick
;
79 update_process_times(user_mode(regs
));
82 write_seqlock(&xtime_lock
);
84 write_sequnlock(&xtime_lock
);
88 /* check soft power switch status */
89 if (cpu
== 0 && !atomic_read(&power_tasklet
.count
))
90 tasklet_schedule(&power_tasklet
);
96 unsigned long profile_pc(struct pt_regs
*regs
)
98 unsigned long pc
= instruction_pointer(regs
);
100 if (regs
->gr
[0] & PSW_N
)
104 if (in_lock_functions(pc
))
110 EXPORT_SYMBOL(profile_pc
);
113 /*** converted from ia64 ***/
115 * Return the number of micro-seconds that elapsed since the last
116 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock
117 * must be at least read-locked when calling this routine.
119 static inline unsigned long
124 * FIXME: This won't work on smp because jiffies are updated by cpu 0.
125 * Once parisc-linux learns the cr16 difference between processors,
126 * this could be made to work.
131 /* it_value is the intended time of the next tick */
132 last_tick
= cpu_data
[smp_processor_id()].it_value
;
134 /* Subtract one tick and account for possible difference between
135 * when we expected the tick and when it actually arrived.
138 last_tick
-= clocktick
* (jiffies
- wall_jiffies
+ 1);
139 elapsed_cycles
= mfctl(16) - last_tick
;
141 /* the precision of this math could be improved */
142 return elapsed_cycles
/ (PAGE0
->mem_10msec
/ 10000);
149 do_gettimeofday (struct timeval
*tv
)
151 unsigned long flags
, seq
, usec
, sec
;
154 seq
= read_seqbegin_irqsave(&xtime_lock
, flags
);
155 usec
= gettimeoffset();
157 usec
+= (xtime
.tv_nsec
/ 1000);
158 } while (read_seqretry_irqrestore(&xtime_lock
, seq
, flags
));
160 while (usec
>= 1000000) {
169 EXPORT_SYMBOL(do_gettimeofday
);
172 do_settimeofday (struct timespec
*tv
)
174 time_t wtm_sec
, sec
= tv
->tv_sec
;
175 long wtm_nsec
, nsec
= tv
->tv_nsec
;
177 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
180 write_seqlock_irq(&xtime_lock
);
183 * This is revolting. We need to set "xtime"
184 * correctly. However, the value in this location is
185 * the value at the most recent update of wall time.
186 * Discover what correction gettimeofday would have
187 * done, and then undo it!
189 nsec
-= gettimeoffset() * 1000;
191 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- sec
);
192 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- nsec
);
194 set_normalized_timespec(&xtime
, sec
, nsec
);
195 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
199 write_sequnlock_irq(&xtime_lock
);
203 EXPORT_SYMBOL(do_settimeofday
);
206 * XXX: We can do better than this.
207 * Returns nanoseconds
210 unsigned long long sched_clock(void)
212 return (unsigned long long)jiffies
* (1000000000 / HZ
);
216 void __init
time_init(void)
218 unsigned long next_tick
;
219 static struct pdc_tod tod_data
;
221 clocktick
= (100 * PAGE0
->mem_10msec
) / HZ
;
222 halftick
= clocktick
/ 2;
224 /* Setup clock interrupt timing */
226 next_tick
= mfctl(16);
227 next_tick
+= clocktick
;
228 cpu_data
[smp_processor_id()].it_value
= next_tick
;
230 /* kick off Itimer (CR16) */
231 mtctl(next_tick
, 16);
233 if(pdc_tod_read(&tod_data
) == 0) {
234 write_seqlock_irq(&xtime_lock
);
235 xtime
.tv_sec
= tod_data
.tod_sec
;
236 xtime
.tv_nsec
= tod_data
.tod_usec
* 1000;
237 set_normalized_timespec(&wall_to_monotonic
,
238 -xtime
.tv_sec
, -xtime
.tv_nsec
);
239 write_sequnlock_irq(&xtime_lock
);
241 printk(KERN_ERR
"Error reading tod clock\n");