2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
24 * This read-write spinlock protects us from races in SMP while
25 * playing with xtime and avenrun.
27 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(xtime_lock
);
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
45 struct timespec xtime
__attribute__ ((aligned (16)));
46 struct timespec wall_to_monotonic
__attribute__ ((aligned (16)));
47 static unsigned long total_sleep_time
; /* seconds */
49 /* flag for if timekeeping is suspended */
50 int __read_mostly timekeeping_suspended
;
52 static struct timespec xtime_cache
__attribute__ ((aligned (16)));
53 void update_xtime_cache(u64 nsec
)
56 timespec_add_ns(&xtime_cache
, nsec
);
59 struct clocksource
*clock
;
62 #ifdef CONFIG_GENERIC_TIME
64 * clocksource_forward_now - update clock to the current time
66 * Forward the current clock to update its state since the last call to
67 * update_wall_time(). This is useful before significant clock changes,
68 * as it avoids having to deal with this time offset explicitly.
70 static void clocksource_forward_now(void)
72 cycle_t cycle_now
, cycle_delta
;
75 cycle_now
= clocksource_read(clock
);
76 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
77 clock
->cycle_last
= cycle_now
;
79 nsec
= cyc2ns(clock
, cycle_delta
);
80 timespec_add_ns(&xtime
, nsec
);
82 nsec
= ((s64
)cycle_delta
* clock
->mult_orig
) >> clock
->shift
;
83 clock
->raw_time
.tv_nsec
+= nsec
;
87 * getnstimeofday - Returns the time of day in a timespec
88 * @ts: pointer to the timespec to be set
90 * Returns the time of day in a timespec.
92 void getnstimeofday(struct timespec
*ts
)
94 cycle_t cycle_now
, cycle_delta
;
98 WARN_ON(timekeeping_suspended
);
101 seq
= read_seqbegin(&xtime_lock
);
105 /* read clocksource: */
106 cycle_now
= clocksource_read(clock
);
108 /* calculate the delta since the last update_wall_time: */
109 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
111 /* convert to nanoseconds: */
112 nsecs
= cyc2ns(clock
, cycle_delta
);
114 } while (read_seqretry(&xtime_lock
, seq
));
116 timespec_add_ns(ts
, nsecs
);
119 EXPORT_SYMBOL(getnstimeofday
);
122 * do_gettimeofday - Returns the time of day in a timeval
123 * @tv: pointer to the timeval to be set
125 * NOTE: Users should be converted to using getnstimeofday()
127 void do_gettimeofday(struct timeval
*tv
)
131 getnstimeofday(&now
);
132 tv
->tv_sec
= now
.tv_sec
;
133 tv
->tv_usec
= now
.tv_nsec
/1000;
136 EXPORT_SYMBOL(do_gettimeofday
);
138 * do_settimeofday - Sets the time of day
139 * @tv: pointer to the timespec variable containing the new time
141 * Sets the time of day to the new time and update NTP and notify hrtimers
143 int do_settimeofday(struct timespec
*tv
)
145 struct timespec ts_delta
;
148 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
151 write_seqlock_irqsave(&xtime_lock
, flags
);
153 clocksource_forward_now();
155 ts_delta
.tv_sec
= tv
->tv_sec
- xtime
.tv_sec
;
156 ts_delta
.tv_nsec
= tv
->tv_nsec
- xtime
.tv_nsec
;
157 wall_to_monotonic
= timespec_sub(wall_to_monotonic
, ts_delta
);
161 update_xtime_cache(0);
166 update_vsyscall(&xtime
, clock
);
168 write_sequnlock_irqrestore(&xtime_lock
, flags
);
170 /* signal hrtimers about time change */
176 EXPORT_SYMBOL(do_settimeofday
);
179 * change_clocksource - Swaps clocksources if a new one is available
181 * Accumulates current time interval and initializes new clocksource
183 static void change_clocksource(void)
185 struct clocksource
*new, *old
;
187 new = clocksource_get_next();
192 clocksource_forward_now();
194 if (clocksource_enable(new))
197 new->raw_time
= clock
->raw_time
;
200 clocksource_disable(old
);
202 clock
->cycle_last
= 0;
203 clock
->cycle_last
= clocksource_read(clock
);
205 clock
->xtime_nsec
= 0;
206 clocksource_calculate_interval(clock
, NTP_INTERVAL_LENGTH
);
211 * We're holding xtime lock and waking up klogd would deadlock
212 * us on enqueue. So no printing!
213 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
218 static inline void clocksource_forward_now(void) { }
219 static inline void change_clocksource(void) { }
223 * getrawmonotonic - Returns the raw monotonic time in a timespec
224 * @ts: pointer to the timespec to be set
226 * Returns the raw monotonic time (completely un-modified by ntp)
228 void getrawmonotonic(struct timespec
*ts
)
232 cycle_t cycle_now
, cycle_delta
;
235 seq
= read_seqbegin(&xtime_lock
);
237 /* read clocksource: */
238 cycle_now
= clocksource_read(clock
);
240 /* calculate the delta since the last update_wall_time: */
241 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
243 /* convert to nanoseconds: */
244 nsecs
= ((s64
)cycle_delta
* clock
->mult_orig
) >> clock
->shift
;
246 *ts
= clock
->raw_time
;
248 } while (read_seqretry(&xtime_lock
, seq
));
250 timespec_add_ns(ts
, nsecs
);
252 EXPORT_SYMBOL(getrawmonotonic
);
256 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
258 int timekeeping_valid_for_hres(void)
264 seq
= read_seqbegin(&xtime_lock
);
266 ret
= clock
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
;
268 } while (read_seqretry(&xtime_lock
, seq
));
274 * read_persistent_clock - Return time in seconds from the persistent clock.
276 * Weak dummy function for arches that do not yet support it.
277 * Returns seconds from epoch using the battery backed persistent clock.
278 * Returns zero if unsupported.
280 * XXX - Do be sure to remove it once all arches implement it.
282 unsigned long __attribute__((weak
)) read_persistent_clock(void)
288 * timekeeping_init - Initializes the clocksource and common timekeeping values
290 void __init
timekeeping_init(void)
293 unsigned long sec
= read_persistent_clock();
295 write_seqlock_irqsave(&xtime_lock
, flags
);
299 clock
= clocksource_get_next();
300 clocksource_enable(clock
);
301 clocksource_calculate_interval(clock
, NTP_INTERVAL_LENGTH
);
302 clock
->cycle_last
= clocksource_read(clock
);
306 set_normalized_timespec(&wall_to_monotonic
,
307 -xtime
.tv_sec
, -xtime
.tv_nsec
);
308 update_xtime_cache(0);
309 total_sleep_time
= 0;
310 write_sequnlock_irqrestore(&xtime_lock
, flags
);
313 /* time in seconds when suspend began */
314 static unsigned long timekeeping_suspend_time
;
317 * timekeeping_resume - Resumes the generic timekeeping subsystem.
320 * This is for the generic clocksource timekeeping.
321 * xtime/wall_to_monotonic/jiffies/etc are
322 * still managed by arch specific suspend/resume code.
324 static int timekeeping_resume(struct sys_device
*dev
)
327 unsigned long now
= read_persistent_clock();
329 clocksource_resume();
331 write_seqlock_irqsave(&xtime_lock
, flags
);
333 if (now
&& (now
> timekeeping_suspend_time
)) {
334 unsigned long sleep_length
= now
- timekeeping_suspend_time
;
336 xtime
.tv_sec
+= sleep_length
;
337 wall_to_monotonic
.tv_sec
-= sleep_length
;
338 total_sleep_time
+= sleep_length
;
340 update_xtime_cache(0);
341 /* re-base the last cycle value */
342 clock
->cycle_last
= 0;
343 clock
->cycle_last
= clocksource_read(clock
);
345 timekeeping_suspended
= 0;
346 write_sequnlock_irqrestore(&xtime_lock
, flags
);
348 touch_softlockup_watchdog();
350 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME
, NULL
);
352 /* Resume hrtimers */
353 hres_timers_resume();
358 static int timekeeping_suspend(struct sys_device
*dev
, pm_message_t state
)
362 timekeeping_suspend_time
= read_persistent_clock();
364 write_seqlock_irqsave(&xtime_lock
, flags
);
365 clocksource_forward_now();
366 timekeeping_suspended
= 1;
367 write_sequnlock_irqrestore(&xtime_lock
, flags
);
369 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND
, NULL
);
374 /* sysfs resume/suspend bits for timekeeping */
375 static struct sysdev_class timekeeping_sysclass
= {
376 .name
= "timekeeping",
377 .resume
= timekeeping_resume
,
378 .suspend
= timekeeping_suspend
,
381 static struct sys_device device_timer
= {
383 .cls
= &timekeeping_sysclass
,
386 static int __init
timekeeping_init_device(void)
388 int error
= sysdev_class_register(&timekeeping_sysclass
);
390 error
= sysdev_register(&device_timer
);
394 device_initcall(timekeeping_init_device
);
397 * If the error is already larger, we look ahead even further
398 * to compensate for late or lost adjustments.
400 static __always_inline
int clocksource_bigadjust(s64 error
, s64
*interval
,
408 * Use the current error value to determine how much to look ahead.
409 * The larger the error the slower we adjust for it to avoid problems
410 * with losing too many ticks, otherwise we would overadjust and
411 * produce an even larger error. The smaller the adjustment the
412 * faster we try to adjust for it, as lost ticks can do less harm
413 * here. This is tuned so that an error of about 1 msec is adjusted
414 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
416 error2
= clock
->error
>> (NTP_SCALE_SHIFT
+ 22 - 2 * SHIFT_HZ
);
417 error2
= abs(error2
);
418 for (look_ahead
= 0; error2
> 0; look_ahead
++)
422 * Now calculate the error in (1 << look_ahead) ticks, but first
423 * remove the single look ahead already included in the error.
425 tick_error
= tick_length
>> (NTP_SCALE_SHIFT
- clock
->shift
+ 1);
426 tick_error
-= clock
->xtime_interval
>> 1;
427 error
= ((error
- tick_error
) >> look_ahead
) + tick_error
;
429 /* Finally calculate the adjustment shift value. */
434 *interval
= -*interval
;
438 for (adj
= 0; error
> i
; adj
++)
447 * Adjust the multiplier to reduce the error value,
448 * this is optimized for the most common adjustments of -1,0,1,
449 * for other values we can do a bit more work.
451 static void clocksource_adjust(s64 offset
)
453 s64 error
, interval
= clock
->cycle_interval
;
456 error
= clock
->error
>> (NTP_SCALE_SHIFT
- clock
->shift
- 1);
457 if (error
> interval
) {
459 if (likely(error
<= interval
))
462 adj
= clocksource_bigadjust(error
, &interval
, &offset
);
463 } else if (error
< -interval
) {
465 if (likely(error
>= -interval
)) {
467 interval
= -interval
;
470 adj
= clocksource_bigadjust(error
, &interval
, &offset
);
475 clock
->xtime_interval
+= interval
;
476 clock
->xtime_nsec
-= offset
;
477 clock
->error
-= (interval
- offset
) <<
478 (NTP_SCALE_SHIFT
- clock
->shift
);
482 * update_wall_time - Uses the current clocksource to increment the wall time
484 * Called from the timer interrupt, must hold a write on xtime_lock.
486 void update_wall_time(void)
490 /* Make sure we're fully resumed: */
491 if (unlikely(timekeeping_suspended
))
494 #ifdef CONFIG_GENERIC_TIME
495 offset
= (clocksource_read(clock
) - clock
->cycle_last
) & clock
->mask
;
497 offset
= clock
->cycle_interval
;
499 clock
->xtime_nsec
= (s64
)xtime
.tv_nsec
<< clock
->shift
;
501 /* normally this loop will run just once, however in the
502 * case of lost or late ticks, it will accumulate correctly.
504 while (offset
>= clock
->cycle_interval
) {
505 /* accumulate one interval */
506 offset
-= clock
->cycle_interval
;
507 clock
->cycle_last
+= clock
->cycle_interval
;
509 clock
->xtime_nsec
+= clock
->xtime_interval
;
510 if (clock
->xtime_nsec
>= (u64
)NSEC_PER_SEC
<< clock
->shift
) {
511 clock
->xtime_nsec
-= (u64
)NSEC_PER_SEC
<< clock
->shift
;
516 clock
->raw_time
.tv_nsec
+= clock
->raw_interval
;
517 if (clock
->raw_time
.tv_nsec
>= NSEC_PER_SEC
) {
518 clock
->raw_time
.tv_nsec
-= NSEC_PER_SEC
;
519 clock
->raw_time
.tv_sec
++;
522 /* accumulate error between NTP and clock interval */
523 clock
->error
+= tick_length
;
524 clock
->error
-= clock
->xtime_interval
<< (NTP_SCALE_SHIFT
- clock
->shift
);
527 /* correct the clock when NTP error is too big */
528 clocksource_adjust(offset
);
531 * Since in the loop above, we accumulate any amount of time
532 * in xtime_nsec over a second into xtime.tv_sec, its possible for
533 * xtime_nsec to be fairly small after the loop. Further, if we're
534 * slightly speeding the clocksource up in clocksource_adjust(),
535 * its possible the required corrective factor to xtime_nsec could
536 * cause it to underflow.
538 * Now, we cannot simply roll the accumulated second back, since
539 * the NTP subsystem has been notified via second_overflow. So
540 * instead we push xtime_nsec forward by the amount we underflowed,
541 * and add that amount into the error.
543 * We'll correct this error next time through this function, when
544 * xtime_nsec is not as small.
546 if (unlikely((s64
)clock
->xtime_nsec
< 0)) {
547 s64 neg
= -(s64
)clock
->xtime_nsec
;
548 clock
->xtime_nsec
= 0;
549 clock
->error
+= neg
<< (NTP_SCALE_SHIFT
- clock
->shift
);
552 /* store full nanoseconds into xtime after rounding it up and
553 * add the remainder to the error difference.
555 xtime
.tv_nsec
= ((s64
)clock
->xtime_nsec
>> clock
->shift
) + 1;
556 clock
->xtime_nsec
-= (s64
)xtime
.tv_nsec
<< clock
->shift
;
557 clock
->error
+= clock
->xtime_nsec
<< (NTP_SCALE_SHIFT
- clock
->shift
);
559 update_xtime_cache(cyc2ns(clock
, offset
));
561 /* check to see if there is a new clocksource to use */
562 change_clocksource();
563 update_vsyscall(&xtime
, clock
);
567 * getboottime - Return the real time of system boot.
568 * @ts: pointer to the timespec to be set
570 * Returns the time of day in a timespec.
572 * This is based on the wall_to_monotonic offset and the total suspend
573 * time. Calls to settimeofday will affect the value returned (which
574 * basically means that however wrong your real time clock is at boot time,
575 * you get the right time here).
577 void getboottime(struct timespec
*ts
)
579 set_normalized_timespec(ts
,
580 - (wall_to_monotonic
.tv_sec
+ total_sleep_time
),
581 - wall_to_monotonic
.tv_nsec
);
585 * monotonic_to_bootbased - Convert the monotonic time to boot based.
586 * @ts: pointer to the timespec to be converted
588 void monotonic_to_bootbased(struct timespec
*ts
)
590 ts
->tv_sec
+= total_sleep_time
;
593 unsigned long get_seconds(void)
595 return xtime_cache
.tv_sec
;
597 EXPORT_SYMBOL(get_seconds
);
600 struct timespec
current_kernel_time(void)
606 seq
= read_seqbegin(&xtime_lock
);
609 } while (read_seqretry(&xtime_lock
, seq
));
613 EXPORT_SYMBOL(current_kernel_time
);