GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / kernel / time / timekeeping.c
blob3b12c679627b3763b1194a4267e98a282693d71f
1 /*
2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
9 */
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/sysdev.h>
18 #include <linux/clocksource.h>
19 #include <linux/jiffies.h>
20 #include <linux/time.h>
21 #include <linux/tick.h>
22 #include <linux/stop_machine.h>
24 /* Structure holding internal timekeeping values. */
25 struct timekeeper {
26 /* Current clocksource used for timekeeping. */
27 struct clocksource *clock;
28 /* The shift value of the current clocksource. */
29 int shift;
31 /* Number of clock cycles in one NTP interval. */
32 cycle_t cycle_interval;
33 /* Number of clock shifted nano seconds in one NTP interval. */
34 u64 xtime_interval;
35 /* Raw nano seconds accumulated per NTP interval. */
36 u32 raw_interval;
38 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
39 u64 xtime_nsec;
40 /* Difference between accumulated time and NTP time in ntp
41 * shifted nano seconds. */
42 s64 ntp_error;
43 /* Shift conversion between clock shifted nano seconds and
44 * ntp shifted nano seconds. */
45 int ntp_error_shift;
46 /* NTP adjusted clock multiplier */
47 u32 mult;
50 struct timekeeper timekeeper;
52 /**
53 * timekeeper_setup_internals - Set up internals to use clocksource clock.
55 * @clock: Pointer to clocksource.
57 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
58 * pair and interval request.
60 * Unless you're the timekeeping code, you should not be using this!
62 static void timekeeper_setup_internals(struct clocksource *clock)
64 cycle_t interval;
65 u64 tmp;
67 timekeeper.clock = clock;
68 clock->cycle_last = clock->read(clock);
70 /* Do the ns -> cycle conversion first, using original mult */
71 tmp = NTP_INTERVAL_LENGTH;
72 tmp <<= clock->shift;
73 tmp += clock->mult/2;
74 do_div(tmp, clock->mult);
75 if (tmp == 0)
76 tmp = 1;
78 interval = (cycle_t) tmp;
79 timekeeper.cycle_interval = interval;
81 /* Go back from cycles -> shifted ns */
82 timekeeper.xtime_interval = (u64) interval * clock->mult;
83 timekeeper.raw_interval =
84 ((u64) interval * clock->mult) >> clock->shift;
86 timekeeper.xtime_nsec = 0;
87 timekeeper.shift = clock->shift;
89 timekeeper.ntp_error = 0;
90 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
93 * The timekeeper keeps its own mult values for the currently
94 * active clocksource. These value will be adjusted via NTP
95 * to counteract clock drifting.
97 timekeeper.mult = clock->mult;
100 /* Timekeeper helper functions. */
101 static inline s64 timekeeping_get_ns(void)
103 cycle_t cycle_now, cycle_delta;
104 struct clocksource *clock;
106 /* read clocksource: */
107 clock = timekeeper.clock;
108 cycle_now = clock->read(clock);
110 /* calculate the delta since the last update_wall_time: */
111 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
113 /* return delta convert to nanoseconds using ntp adjusted mult. */
114 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
115 timekeeper.shift);
118 static inline s64 timekeeping_get_ns_raw(void)
120 cycle_t cycle_now, cycle_delta;
121 struct clocksource *clock;
123 /* read clocksource: */
124 clock = timekeeper.clock;
125 cycle_now = clock->read(clock);
127 /* calculate the delta since the last update_wall_time: */
128 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
130 /* return delta convert to nanoseconds using ntp adjusted mult. */
131 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
135 * This read-write spinlock protects us from races in SMP while
136 * playing with xtime.
138 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
142 * The current time
143 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
144 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
145 * at zero at system boot time, so wall_to_monotonic will be negative,
146 * however, we will ALWAYS keep the tv_nsec part positive so we can use
147 * the usual normalization.
149 * wall_to_monotonic is moved after resume from suspend for the monotonic
150 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
151 * to get the real boot based time offset.
153 * - wall_to_monotonic is no longer the boot time, getboottime must be
154 * used instead.
156 static struct timespec xtime __attribute__ ((aligned (16)));
157 static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
158 static struct timespec total_sleep_time;
161 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
163 struct timespec raw_time;
165 /* flag for if timekeeping is suspended */
166 int __read_mostly timekeeping_suspended;
168 /* must hold xtime_lock */
169 void timekeeping_leap_insert(int leapsecond)
171 xtime.tv_sec += leapsecond;
172 wall_to_monotonic.tv_sec -= leapsecond;
173 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
174 timekeeper.mult);
178 * timekeeping_forward_now - update clock to the current time
180 * Forward the current clock to update its state since the last call to
181 * update_wall_time(). This is useful before significant clock changes,
182 * as it avoids having to deal with this time offset explicitly.
184 static void timekeeping_forward_now(void)
186 cycle_t cycle_now, cycle_delta;
187 struct clocksource *clock;
188 s64 nsec;
190 clock = timekeeper.clock;
191 cycle_now = clock->read(clock);
192 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
193 clock->cycle_last = cycle_now;
195 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
196 timekeeper.shift);
198 /* If arch requires, add in gettimeoffset() */
199 nsec += arch_gettimeoffset();
201 timespec_add_ns(&xtime, nsec);
203 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
204 timespec_add_ns(&raw_time, nsec);
208 * getnstimeofday - Returns the time of day in a timespec
209 * @ts: pointer to the timespec to be set
211 * Returns the time of day in a timespec.
213 void getnstimeofday(struct timespec *ts)
215 unsigned long seq;
216 s64 nsecs;
218 WARN_ON(timekeeping_suspended);
220 do {
221 seq = read_seqbegin(&xtime_lock);
223 *ts = xtime;
224 nsecs = timekeeping_get_ns();
226 /* If arch requires, add in gettimeoffset() */
227 nsecs += arch_gettimeoffset();
229 } while (read_seqretry(&xtime_lock, seq));
231 timespec_add_ns(ts, nsecs);
234 EXPORT_SYMBOL(getnstimeofday);
236 ktime_t ktime_get(void)
238 unsigned int seq;
239 s64 secs, nsecs;
241 WARN_ON(timekeeping_suspended);
243 do {
244 seq = read_seqbegin(&xtime_lock);
245 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
246 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
247 nsecs += timekeeping_get_ns();
249 } while (read_seqretry(&xtime_lock, seq));
251 * Use ktime_set/ktime_add_ns to create a proper ktime on
252 * 32-bit architectures without CONFIG_KTIME_SCALAR.
254 return ktime_add_ns(ktime_set(secs, 0), nsecs);
256 EXPORT_SYMBOL_GPL(ktime_get);
259 * ktime_get_ts - get the monotonic clock in timespec format
260 * @ts: pointer to timespec variable
262 * The function calculates the monotonic clock from the realtime
263 * clock and the wall_to_monotonic offset and stores the result
264 * in normalized timespec format in the variable pointed to by @ts.
266 void ktime_get_ts(struct timespec *ts)
268 struct timespec tomono;
269 unsigned int seq;
270 s64 nsecs;
272 WARN_ON(timekeeping_suspended);
274 do {
275 seq = read_seqbegin(&xtime_lock);
276 *ts = xtime;
277 tomono = wall_to_monotonic;
278 nsecs = timekeeping_get_ns();
280 } while (read_seqretry(&xtime_lock, seq));
282 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
283 ts->tv_nsec + tomono.tv_nsec + nsecs);
285 EXPORT_SYMBOL_GPL(ktime_get_ts);
288 * do_gettimeofday - Returns the time of day in a timeval
289 * @tv: pointer to the timeval to be set
291 * NOTE: Users should be converted to using getnstimeofday()
293 void do_gettimeofday(struct timeval *tv)
295 struct timespec now;
297 getnstimeofday(&now);
298 tv->tv_sec = now.tv_sec;
299 tv->tv_usec = now.tv_nsec/1000;
302 EXPORT_SYMBOL(do_gettimeofday);
304 * do_settimeofday - Sets the time of day
305 * @tv: pointer to the timespec variable containing the new time
307 * Sets the time of day to the new time and update NTP and notify hrtimers
309 int do_settimeofday(struct timespec *tv)
311 struct timespec ts_delta;
312 unsigned long flags;
314 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
315 return -EINVAL;
317 write_seqlock_irqsave(&xtime_lock, flags);
319 timekeeping_forward_now();
321 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
322 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
323 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
325 xtime = *tv;
327 timekeeper.ntp_error = 0;
328 ntp_clear();
330 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
331 timekeeper.mult);
333 write_sequnlock_irqrestore(&xtime_lock, flags);
335 /* signal hrtimers about time change */
336 clock_was_set();
338 return 0;
341 EXPORT_SYMBOL(do_settimeofday);
344 * change_clocksource - Swaps clocksources if a new one is available
346 * Accumulates current time interval and initializes new clocksource
348 static int change_clocksource(void *data)
350 struct clocksource *new, *old;
352 new = (struct clocksource *) data;
354 timekeeping_forward_now();
355 if (!new->enable || new->enable(new) == 0) {
356 old = timekeeper.clock;
357 timekeeper_setup_internals(new);
358 if (old->disable)
359 old->disable(old);
361 return 0;
365 * timekeeping_notify - Install a new clock source
366 * @clock: pointer to the clock source
368 * This function is called from clocksource.c after a new, better clock
369 * source has been registered. The caller holds the clocksource_mutex.
371 void timekeeping_notify(struct clocksource *clock)
373 if (timekeeper.clock == clock)
374 return;
375 stop_machine(change_clocksource, clock, NULL);
376 tick_clock_notify();
380 * ktime_get_real - get the real (wall-) time in ktime_t format
382 * returns the time in ktime_t format
384 ktime_t ktime_get_real(void)
386 struct timespec now;
388 getnstimeofday(&now);
390 return timespec_to_ktime(now);
392 EXPORT_SYMBOL_GPL(ktime_get_real);
395 * getrawmonotonic - Returns the raw monotonic time in a timespec
396 * @ts: pointer to the timespec to be set
398 * Returns the raw monotonic time (completely un-modified by ntp)
400 void getrawmonotonic(struct timespec *ts)
402 unsigned long seq;
403 s64 nsecs;
405 do {
406 seq = read_seqbegin(&xtime_lock);
407 nsecs = timekeeping_get_ns_raw();
408 *ts = raw_time;
410 } while (read_seqretry(&xtime_lock, seq));
412 timespec_add_ns(ts, nsecs);
414 EXPORT_SYMBOL(getrawmonotonic);
418 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
420 int timekeeping_valid_for_hres(void)
422 unsigned long seq;
423 int ret;
425 do {
426 seq = read_seqbegin(&xtime_lock);
428 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
430 } while (read_seqretry(&xtime_lock, seq));
432 return ret;
436 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
438 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
439 * ensure that the clocksource does not change!
441 u64 timekeeping_max_deferment(void)
443 return timekeeper.clock->max_idle_ns;
446 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
448 ts->tv_sec = 0;
449 ts->tv_nsec = 0;
452 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
454 ts->tv_sec = 0;
455 ts->tv_nsec = 0;
459 * timekeeping_init - Initializes the clocksource and common timekeeping values
461 void __init timekeeping_init(void)
463 struct clocksource *clock;
464 unsigned long flags;
465 struct timespec now, boot;
467 read_persistent_clock(&now);
468 read_boot_clock(&boot);
470 write_seqlock_irqsave(&xtime_lock, flags);
472 ntp_init();
474 clock = clocksource_default_clock();
475 if (clock->enable)
476 clock->enable(clock);
477 timekeeper_setup_internals(clock);
479 xtime.tv_sec = now.tv_sec;
480 xtime.tv_nsec = now.tv_nsec;
481 raw_time.tv_sec = 0;
482 raw_time.tv_nsec = 0;
483 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
484 boot.tv_sec = xtime.tv_sec;
485 boot.tv_nsec = xtime.tv_nsec;
487 set_normalized_timespec(&wall_to_monotonic,
488 -boot.tv_sec, -boot.tv_nsec);
489 total_sleep_time.tv_sec = 0;
490 total_sleep_time.tv_nsec = 0;
491 write_sequnlock_irqrestore(&xtime_lock, flags);
494 /* time in seconds when suspend began */
495 static struct timespec timekeeping_suspend_time;
498 * timekeeping_resume - Resumes the generic timekeeping subsystem.
499 * @dev: unused
501 * This is for the generic clocksource timekeeping.
502 * xtime/wall_to_monotonic/jiffies/etc are
503 * still managed by arch specific suspend/resume code.
505 static int timekeeping_resume(struct sys_device *dev)
507 unsigned long flags;
508 struct timespec ts;
510 read_persistent_clock(&ts);
512 clocksource_resume();
514 write_seqlock_irqsave(&xtime_lock, flags);
516 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
517 ts = timespec_sub(ts, timekeeping_suspend_time);
518 xtime = timespec_add(xtime, ts);
519 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
520 total_sleep_time = timespec_add(total_sleep_time, ts);
522 /* re-base the last cycle value */
523 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
524 timekeeper.ntp_error = 0;
525 timekeeping_suspended = 0;
526 write_sequnlock_irqrestore(&xtime_lock, flags);
528 touch_softlockup_watchdog();
530 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
532 /* Resume hrtimers */
533 hres_timers_resume();
535 return 0;
538 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
540 unsigned long flags;
542 read_persistent_clock(&timekeeping_suspend_time);
544 write_seqlock_irqsave(&xtime_lock, flags);
545 timekeeping_forward_now();
546 timekeeping_suspended = 1;
547 write_sequnlock_irqrestore(&xtime_lock, flags);
549 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
550 clocksource_suspend();
552 return 0;
555 /* sysfs resume/suspend bits for timekeeping */
556 static struct sysdev_class timekeeping_sysclass = {
557 .name = "timekeeping",
558 .resume = timekeeping_resume,
559 .suspend = timekeeping_suspend,
562 static struct sys_device device_timer = {
563 .id = 0,
564 .cls = &timekeeping_sysclass,
567 static int __init timekeeping_init_device(void)
569 int error = sysdev_class_register(&timekeeping_sysclass);
570 if (!error)
571 error = sysdev_register(&device_timer);
572 return error;
575 device_initcall(timekeeping_init_device);
578 * If the error is already larger, we look ahead even further
579 * to compensate for late or lost adjustments.
581 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
582 s64 *offset)
584 s64 tick_error, i;
585 u32 look_ahead, adj;
586 s32 error2, mult;
589 * Use the current error value to determine how much to look ahead.
590 * The larger the error the slower we adjust for it to avoid problems
591 * with losing too many ticks, otherwise we would overadjust and
592 * produce an even larger error. The smaller the adjustment the
593 * faster we try to adjust for it, as lost ticks can do less harm
594 * here. This is tuned so that an error of about 1 msec is adjusted
595 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
597 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
598 error2 = abs(error2);
599 for (look_ahead = 0; error2 > 0; look_ahead++)
600 error2 >>= 2;
603 * Now calculate the error in (1 << look_ahead) ticks, but first
604 * remove the single look ahead already included in the error.
606 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
607 tick_error -= timekeeper.xtime_interval >> 1;
608 error = ((error - tick_error) >> look_ahead) + tick_error;
610 /* Finally calculate the adjustment shift value. */
611 i = *interval;
612 mult = 1;
613 if (error < 0) {
614 error = -error;
615 *interval = -*interval;
616 *offset = -*offset;
617 mult = -1;
619 for (adj = 0; error > i; adj++)
620 error >>= 1;
622 *interval <<= adj;
623 *offset <<= adj;
624 return mult << adj;
628 * Adjust the multiplier to reduce the error value,
629 * this is optimized for the most common adjustments of -1,0,1,
630 * for other values we can do a bit more work.
632 static void timekeeping_adjust(s64 offset)
634 s64 error, interval = timekeeper.cycle_interval;
635 int adj;
637 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
638 if (error > interval) {
639 error >>= 2;
640 if (likely(error <= interval))
641 adj = 1;
642 else
643 adj = timekeeping_bigadjust(error, &interval, &offset);
644 } else if (error < -interval) {
645 error >>= 2;
646 if (likely(error >= -interval)) {
647 adj = -1;
648 interval = -interval;
649 offset = -offset;
650 } else
651 adj = timekeeping_bigadjust(error, &interval, &offset);
652 } else
653 return;
655 timekeeper.mult += adj;
656 timekeeper.xtime_interval += interval;
657 timekeeper.xtime_nsec -= offset;
658 timekeeper.ntp_error -= (interval - offset) <<
659 timekeeper.ntp_error_shift;
664 * logarithmic_accumulation - shifted accumulation of cycles
666 * This functions accumulates a shifted interval of cycles into
667 * into a shifted interval nanoseconds. Allows for O(log) accumulation
668 * loop.
670 * Returns the unconsumed cycles.
672 static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
674 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
675 u64 raw_nsecs;
677 /* If the offset is smaller then a shifted interval, do nothing */
678 if (offset < timekeeper.cycle_interval<<shift)
679 return offset;
681 /* Accumulate one shifted interval */
682 offset -= timekeeper.cycle_interval << shift;
683 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
685 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
686 while (timekeeper.xtime_nsec >= nsecps) {
687 timekeeper.xtime_nsec -= nsecps;
688 xtime.tv_sec++;
689 second_overflow();
692 /* Accumulate raw time */
693 raw_nsecs = timekeeper.raw_interval << shift;
694 raw_nsecs += raw_time.tv_nsec;
695 if (raw_nsecs >= NSEC_PER_SEC) {
696 u64 raw_secs = raw_nsecs;
697 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
698 raw_time.tv_sec += raw_secs;
700 raw_time.tv_nsec = raw_nsecs;
702 /* Accumulate error between NTP and clock interval */
703 timekeeper.ntp_error += tick_length << shift;
704 timekeeper.ntp_error -= timekeeper.xtime_interval <<
705 (timekeeper.ntp_error_shift + shift);
707 return offset;
712 * update_wall_time - Uses the current clocksource to increment the wall time
714 * Called from the timer interrupt, must hold a write on xtime_lock.
716 void update_wall_time(void)
718 struct clocksource *clock;
719 cycle_t offset;
720 int shift = 0, maxshift;
722 /* Make sure we're fully resumed: */
723 if (unlikely(timekeeping_suspended))
724 return;
726 clock = timekeeper.clock;
728 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
729 offset = timekeeper.cycle_interval;
730 #else
731 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
732 #endif
733 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
736 * With NO_HZ we may have to accumulate many cycle_intervals
737 * (think "ticks") worth of time at once. To do this efficiently,
738 * we calculate the largest doubling multiple of cycle_intervals
739 * that is smaller then the offset. We then accumulate that
740 * chunk in one go, and then try to consume the next smaller
741 * doubled multiple.
743 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
744 shift = max(0, shift);
745 /* Bound shift to one less then what overflows tick_length */
746 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
747 shift = min(shift, maxshift);
748 while (offset >= timekeeper.cycle_interval) {
749 offset = logarithmic_accumulation(offset, shift);
750 if(offset < timekeeper.cycle_interval<<shift)
751 shift--;
754 /* correct the clock when NTP error is too big */
755 timekeeping_adjust(offset);
758 * Since in the loop above, we accumulate any amount of time
759 * in xtime_nsec over a second into xtime.tv_sec, its possible for
760 * xtime_nsec to be fairly small after the loop. Further, if we're
761 * slightly speeding the clocksource up in timekeeping_adjust(),
762 * its possible the required corrective factor to xtime_nsec could
763 * cause it to underflow.
765 * Now, we cannot simply roll the accumulated second back, since
766 * the NTP subsystem has been notified via second_overflow. So
767 * instead we push xtime_nsec forward by the amount we underflowed,
768 * and add that amount into the error.
770 * We'll correct this error next time through this function, when
771 * xtime_nsec is not as small.
773 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
774 s64 neg = -(s64)timekeeper.xtime_nsec;
775 timekeeper.xtime_nsec = 0;
776 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
781 * Store full nanoseconds into xtime after rounding it up and
782 * add the remainder to the error difference.
784 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
785 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
786 timekeeper.ntp_error += timekeeper.xtime_nsec <<
787 timekeeper.ntp_error_shift;
790 * Finally, make sure that after the rounding
791 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
793 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
794 xtime.tv_nsec -= NSEC_PER_SEC;
795 xtime.tv_sec++;
796 second_overflow();
799 /* check to see if there is a new clocksource to use */
800 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
801 timekeeper.mult);
805 * getboottime - Return the real time of system boot.
806 * @ts: pointer to the timespec to be set
808 * Returns the time of day in a timespec.
810 * This is based on the wall_to_monotonic offset and the total suspend
811 * time. Calls to settimeofday will affect the value returned (which
812 * basically means that however wrong your real time clock is at boot time,
813 * you get the right time here).
815 void getboottime(struct timespec *ts)
817 struct timespec boottime = {
818 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
819 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
822 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
824 EXPORT_SYMBOL_GPL(getboottime);
827 * monotonic_to_bootbased - Convert the monotonic time to boot based.
828 * @ts: pointer to the timespec to be converted
830 void monotonic_to_bootbased(struct timespec *ts)
832 *ts = timespec_add(*ts, total_sleep_time);
834 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
836 unsigned long get_seconds(void)
838 return xtime.tv_sec;
840 EXPORT_SYMBOL(get_seconds);
842 struct timespec __current_kernel_time(void)
844 return xtime;
847 struct timespec __get_wall_to_monotonic(void)
849 return wall_to_monotonic;
852 struct timespec current_kernel_time(void)
854 struct timespec now;
855 unsigned long seq;
857 do {
858 seq = read_seqbegin(&xtime_lock);
860 now = xtime;
861 } while (read_seqretry(&xtime_lock, seq));
863 return now;
865 EXPORT_SYMBOL(current_kernel_time);
867 struct timespec get_monotonic_coarse(void)
869 struct timespec now, mono;
870 unsigned long seq;
872 do {
873 seq = read_seqbegin(&xtime_lock);
875 now = xtime;
876 mono = wall_to_monotonic;
877 } while (read_seqretry(&xtime_lock, seq));
879 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
880 now.tv_nsec + mono.tv_nsec);
881 return now;