timekeeping: Increase granularity of read_persistent_clock()
[linux-2.6/btrfs-unstable.git] / kernel / time / timekeeping.c
blobf1a21ce491e6ad63eab28b09b9bc630ded68d25b
1 /*
2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
9 */
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
21 #include <linux/stop_machine.h>
23 /* Structure holding internal timekeeping values. */
24 struct timekeeper {
25 /* Current clocksource used for timekeeping. */
26 struct clocksource *clock;
27 /* The shift value of the current clocksource. */
28 int shift;
30 /* Number of clock cycles in one NTP interval. */
31 cycle_t cycle_interval;
32 /* Number of clock shifted nano seconds in one NTP interval. */
33 u64 xtime_interval;
34 /* Raw nano seconds accumulated per NTP interval. */
35 u32 raw_interval;
37 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
38 u64 xtime_nsec;
39 /* Difference between accumulated time and NTP time in ntp
40 * shifted nano seconds. */
41 s64 ntp_error;
42 /* Shift conversion between clock shifted nano seconds and
43 * ntp shifted nano seconds. */
44 int ntp_error_shift;
45 /* NTP adjusted clock multiplier */
46 u32 mult;
49 struct timekeeper timekeeper;
51 /**
52 * timekeeper_setup_internals - Set up internals to use clocksource clock.
54 * @clock: Pointer to clocksource.
56 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
57 * pair and interval request.
59 * Unless you're the timekeeping code, you should not be using this!
61 static void timekeeper_setup_internals(struct clocksource *clock)
63 cycle_t interval;
64 u64 tmp;
66 timekeeper.clock = clock;
67 clock->cycle_last = clock->read(clock);
69 /* Do the ns -> cycle conversion first, using original mult */
70 tmp = NTP_INTERVAL_LENGTH;
71 tmp <<= clock->shift;
72 tmp += clock->mult/2;
73 do_div(tmp, clock->mult);
74 if (tmp == 0)
75 tmp = 1;
77 interval = (cycle_t) tmp;
78 timekeeper.cycle_interval = interval;
80 /* Go back from cycles -> shifted ns */
81 timekeeper.xtime_interval = (u64) interval * clock->mult;
82 timekeeper.raw_interval =
83 ((u64) interval * clock->mult) >> clock->shift;
85 timekeeper.xtime_nsec = 0;
86 timekeeper.shift = clock->shift;
88 timekeeper.ntp_error = 0;
89 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
92 * The timekeeper keeps its own mult values for the currently
93 * active clocksource. These value will be adjusted via NTP
94 * to counteract clock drifting.
96 timekeeper.mult = clock->mult;
99 /* Timekeeper helper functions. */
100 static inline s64 timekeeping_get_ns(void)
102 cycle_t cycle_now, cycle_delta;
103 struct clocksource *clock;
105 /* read clocksource: */
106 clock = timekeeper.clock;
107 cycle_now = clock->read(clock);
109 /* calculate the delta since the last update_wall_time: */
110 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
112 /* return delta convert to nanoseconds using ntp adjusted mult. */
113 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
114 timekeeper.shift);
117 static inline s64 timekeeping_get_ns_raw(void)
119 cycle_t cycle_now, cycle_delta;
120 struct clocksource *clock;
122 /* read clocksource: */
123 clock = timekeeper.clock;
124 cycle_now = clock->read(clock);
126 /* calculate the delta since the last update_wall_time: */
127 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
129 /* return delta convert to nanoseconds using ntp adjusted mult. */
130 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
134 * This read-write spinlock protects us from races in SMP while
135 * playing with xtime.
137 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
141 * The current time
142 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
143 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
144 * at zero at system boot time, so wall_to_monotonic will be negative,
145 * however, we will ALWAYS keep the tv_nsec part positive so we can use
146 * the usual normalization.
148 * wall_to_monotonic is moved after resume from suspend for the monotonic
149 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
150 * to get the real boot based time offset.
152 * - wall_to_monotonic is no longer the boot time, getboottime must be
153 * used instead.
155 struct timespec xtime __attribute__ ((aligned (16)));
156 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
157 static struct timespec total_sleep_time;
160 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
162 struct timespec raw_time;
164 /* flag for if timekeeping is suspended */
165 int __read_mostly timekeeping_suspended;
167 static struct timespec xtime_cache __attribute__ ((aligned (16)));
168 void update_xtime_cache(u64 nsec)
170 xtime_cache = xtime;
171 timespec_add_ns(&xtime_cache, nsec);
174 /* must hold xtime_lock */
175 void timekeeping_leap_insert(int leapsecond)
177 xtime.tv_sec += leapsecond;
178 wall_to_monotonic.tv_sec -= leapsecond;
179 update_vsyscall(&xtime, timekeeper.clock);
182 #ifdef CONFIG_GENERIC_TIME
185 * timekeeping_forward_now - update clock to the current time
187 * Forward the current clock to update its state since the last call to
188 * update_wall_time(). This is useful before significant clock changes,
189 * as it avoids having to deal with this time offset explicitly.
191 static void timekeeping_forward_now(void)
193 cycle_t cycle_now, cycle_delta;
194 struct clocksource *clock;
195 s64 nsec;
197 clock = timekeeper.clock;
198 cycle_now = clock->read(clock);
199 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
200 clock->cycle_last = cycle_now;
202 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
203 timekeeper.shift);
205 /* If arch requires, add in gettimeoffset() */
206 nsec += arch_gettimeoffset();
208 timespec_add_ns(&xtime, nsec);
210 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
211 timespec_add_ns(&raw_time, nsec);
215 * getnstimeofday - Returns the time of day in a timespec
216 * @ts: pointer to the timespec to be set
218 * Returns the time of day in a timespec.
220 void getnstimeofday(struct timespec *ts)
222 unsigned long seq;
223 s64 nsecs;
225 WARN_ON(timekeeping_suspended);
227 do {
228 seq = read_seqbegin(&xtime_lock);
230 *ts = xtime;
231 nsecs = timekeeping_get_ns();
233 /* If arch requires, add in gettimeoffset() */
234 nsecs += arch_gettimeoffset();
236 } while (read_seqretry(&xtime_lock, seq));
238 timespec_add_ns(ts, nsecs);
241 EXPORT_SYMBOL(getnstimeofday);
243 ktime_t ktime_get(void)
245 unsigned int seq;
246 s64 secs, nsecs;
248 WARN_ON(timekeeping_suspended);
250 do {
251 seq = read_seqbegin(&xtime_lock);
252 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
253 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
254 nsecs += timekeeping_get_ns();
256 } while (read_seqretry(&xtime_lock, seq));
258 * Use ktime_set/ktime_add_ns to create a proper ktime on
259 * 32-bit architectures without CONFIG_KTIME_SCALAR.
261 return ktime_add_ns(ktime_set(secs, 0), nsecs);
263 EXPORT_SYMBOL_GPL(ktime_get);
266 * ktime_get_ts - get the monotonic clock in timespec format
267 * @ts: pointer to timespec variable
269 * The function calculates the monotonic clock from the realtime
270 * clock and the wall_to_monotonic offset and stores the result
271 * in normalized timespec format in the variable pointed to by @ts.
273 void ktime_get_ts(struct timespec *ts)
275 struct timespec tomono;
276 unsigned int seq;
277 s64 nsecs;
279 WARN_ON(timekeeping_suspended);
281 do {
282 seq = read_seqbegin(&xtime_lock);
283 *ts = xtime;
284 tomono = wall_to_monotonic;
285 nsecs = timekeeping_get_ns();
287 } while (read_seqretry(&xtime_lock, seq));
289 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
290 ts->tv_nsec + tomono.tv_nsec + nsecs);
292 EXPORT_SYMBOL_GPL(ktime_get_ts);
295 * do_gettimeofday - Returns the time of day in a timeval
296 * @tv: pointer to the timeval to be set
298 * NOTE: Users should be converted to using getnstimeofday()
300 void do_gettimeofday(struct timeval *tv)
302 struct timespec now;
304 getnstimeofday(&now);
305 tv->tv_sec = now.tv_sec;
306 tv->tv_usec = now.tv_nsec/1000;
309 EXPORT_SYMBOL(do_gettimeofday);
311 * do_settimeofday - Sets the time of day
312 * @tv: pointer to the timespec variable containing the new time
314 * Sets the time of day to the new time and update NTP and notify hrtimers
316 int do_settimeofday(struct timespec *tv)
318 struct timespec ts_delta;
319 unsigned long flags;
321 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
322 return -EINVAL;
324 write_seqlock_irqsave(&xtime_lock, flags);
326 timekeeping_forward_now();
328 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
329 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
330 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
332 xtime = *tv;
334 update_xtime_cache(0);
336 timekeeper.ntp_error = 0;
337 ntp_clear();
339 update_vsyscall(&xtime, timekeeper.clock);
341 write_sequnlock_irqrestore(&xtime_lock, flags);
343 /* signal hrtimers about time change */
344 clock_was_set();
346 return 0;
349 EXPORT_SYMBOL(do_settimeofday);
352 * change_clocksource - Swaps clocksources if a new one is available
354 * Accumulates current time interval and initializes new clocksource
356 static int change_clocksource(void *data)
358 struct clocksource *new, *old;
360 new = (struct clocksource *) data;
362 timekeeping_forward_now();
363 if (!new->enable || new->enable(new) == 0) {
364 old = timekeeper.clock;
365 timekeeper_setup_internals(new);
366 if (old->disable)
367 old->disable(old);
369 return 0;
373 * timekeeping_notify - Install a new clock source
374 * @clock: pointer to the clock source
376 * This function is called from clocksource.c after a new, better clock
377 * source has been registered. The caller holds the clocksource_mutex.
379 void timekeeping_notify(struct clocksource *clock)
381 if (timekeeper.clock == clock)
382 return;
383 stop_machine(change_clocksource, clock, NULL);
384 tick_clock_notify();
387 #else /* GENERIC_TIME */
389 static inline void timekeeping_forward_now(void) { }
392 * ktime_get - get the monotonic time in ktime_t format
394 * returns the time in ktime_t format
396 ktime_t ktime_get(void)
398 struct timespec now;
400 ktime_get_ts(&now);
402 return timespec_to_ktime(now);
404 EXPORT_SYMBOL_GPL(ktime_get);
407 * ktime_get_ts - get the monotonic clock in timespec format
408 * @ts: pointer to timespec variable
410 * The function calculates the monotonic clock from the realtime
411 * clock and the wall_to_monotonic offset and stores the result
412 * in normalized timespec format in the variable pointed to by @ts.
414 void ktime_get_ts(struct timespec *ts)
416 struct timespec tomono;
417 unsigned long seq;
419 do {
420 seq = read_seqbegin(&xtime_lock);
421 getnstimeofday(ts);
422 tomono = wall_to_monotonic;
424 } while (read_seqretry(&xtime_lock, seq));
426 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
427 ts->tv_nsec + tomono.tv_nsec);
429 EXPORT_SYMBOL_GPL(ktime_get_ts);
431 #endif /* !GENERIC_TIME */
434 * ktime_get_real - get the real (wall-) time in ktime_t format
436 * returns the time in ktime_t format
438 ktime_t ktime_get_real(void)
440 struct timespec now;
442 getnstimeofday(&now);
444 return timespec_to_ktime(now);
446 EXPORT_SYMBOL_GPL(ktime_get_real);
449 * getrawmonotonic - Returns the raw monotonic time in a timespec
450 * @ts: pointer to the timespec to be set
452 * Returns the raw monotonic time (completely un-modified by ntp)
454 void getrawmonotonic(struct timespec *ts)
456 unsigned long seq;
457 s64 nsecs;
459 do {
460 seq = read_seqbegin(&xtime_lock);
461 nsecs = timekeeping_get_ns_raw();
462 *ts = raw_time;
464 } while (read_seqretry(&xtime_lock, seq));
466 timespec_add_ns(ts, nsecs);
468 EXPORT_SYMBOL(getrawmonotonic);
472 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
474 int timekeeping_valid_for_hres(void)
476 unsigned long seq;
477 int ret;
479 do {
480 seq = read_seqbegin(&xtime_lock);
482 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
484 } while (read_seqretry(&xtime_lock, seq));
486 return ret;
490 * read_persistent_clock - Return time from the persistent clock.
492 * Weak dummy function for arches that do not yet support it.
493 * Reads the time from the battery backed persistent clock.
494 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
496 * XXX - Do be sure to remove it once all arches implement it.
498 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
500 ts->tv_sec = 0;
501 ts->tv_nsec = 0;
505 * timekeeping_init - Initializes the clocksource and common timekeeping values
507 void __init timekeeping_init(void)
509 struct clocksource *clock;
510 unsigned long flags;
511 struct timespec now;
513 read_persistent_clock(&now);
515 write_seqlock_irqsave(&xtime_lock, flags);
517 ntp_init();
519 clock = clocksource_default_clock();
520 if (clock->enable)
521 clock->enable(clock);
522 timekeeper_setup_internals(clock);
524 xtime.tv_sec = now.tv_sec;
525 xtime.tv_nsec = now.tv_nsec;
526 raw_time.tv_sec = 0;
527 raw_time.tv_nsec = 0;
528 set_normalized_timespec(&wall_to_monotonic,
529 -xtime.tv_sec, -xtime.tv_nsec);
530 update_xtime_cache(0);
531 total_sleep_time.tv_sec = 0;
532 total_sleep_time.tv_nsec = 0;
533 write_sequnlock_irqrestore(&xtime_lock, flags);
536 /* time in seconds when suspend began */
537 static struct timespec timekeeping_suspend_time;
540 * timekeeping_resume - Resumes the generic timekeeping subsystem.
541 * @dev: unused
543 * This is for the generic clocksource timekeeping.
544 * xtime/wall_to_monotonic/jiffies/etc are
545 * still managed by arch specific suspend/resume code.
547 static int timekeeping_resume(struct sys_device *dev)
549 unsigned long flags;
550 struct timespec ts;
552 read_persistent_clock(&ts);
554 clocksource_resume();
556 write_seqlock_irqsave(&xtime_lock, flags);
558 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
559 ts = timespec_sub(ts, timekeeping_suspend_time);
560 xtime = timespec_add_safe(xtime, ts);
561 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
562 total_sleep_time = timespec_add_safe(total_sleep_time, ts);
564 update_xtime_cache(0);
565 /* re-base the last cycle value */
566 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
567 timekeeper.ntp_error = 0;
568 timekeeping_suspended = 0;
569 write_sequnlock_irqrestore(&xtime_lock, flags);
571 touch_softlockup_watchdog();
573 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
575 /* Resume hrtimers */
576 hres_timers_resume();
578 return 0;
581 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
583 unsigned long flags;
585 read_persistent_clock(&timekeeping_suspend_time);
587 write_seqlock_irqsave(&xtime_lock, flags);
588 timekeeping_forward_now();
589 timekeeping_suspended = 1;
590 write_sequnlock_irqrestore(&xtime_lock, flags);
592 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
594 return 0;
597 /* sysfs resume/suspend bits for timekeeping */
598 static struct sysdev_class timekeeping_sysclass = {
599 .name = "timekeeping",
600 .resume = timekeeping_resume,
601 .suspend = timekeeping_suspend,
604 static struct sys_device device_timer = {
605 .id = 0,
606 .cls = &timekeeping_sysclass,
609 static int __init timekeeping_init_device(void)
611 int error = sysdev_class_register(&timekeeping_sysclass);
612 if (!error)
613 error = sysdev_register(&device_timer);
614 return error;
617 device_initcall(timekeeping_init_device);
620 * If the error is already larger, we look ahead even further
621 * to compensate for late or lost adjustments.
623 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
624 s64 *offset)
626 s64 tick_error, i;
627 u32 look_ahead, adj;
628 s32 error2, mult;
631 * Use the current error value to determine how much to look ahead.
632 * The larger the error the slower we adjust for it to avoid problems
633 * with losing too many ticks, otherwise we would overadjust and
634 * produce an even larger error. The smaller the adjustment the
635 * faster we try to adjust for it, as lost ticks can do less harm
636 * here. This is tuned so that an error of about 1 msec is adjusted
637 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
639 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
640 error2 = abs(error2);
641 for (look_ahead = 0; error2 > 0; look_ahead++)
642 error2 >>= 2;
645 * Now calculate the error in (1 << look_ahead) ticks, but first
646 * remove the single look ahead already included in the error.
648 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
649 tick_error -= timekeeper.xtime_interval >> 1;
650 error = ((error - tick_error) >> look_ahead) + tick_error;
652 /* Finally calculate the adjustment shift value. */
653 i = *interval;
654 mult = 1;
655 if (error < 0) {
656 error = -error;
657 *interval = -*interval;
658 *offset = -*offset;
659 mult = -1;
661 for (adj = 0; error > i; adj++)
662 error >>= 1;
664 *interval <<= adj;
665 *offset <<= adj;
666 return mult << adj;
670 * Adjust the multiplier to reduce the error value,
671 * this is optimized for the most common adjustments of -1,0,1,
672 * for other values we can do a bit more work.
674 static void timekeeping_adjust(s64 offset)
676 s64 error, interval = timekeeper.cycle_interval;
677 int adj;
679 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
680 if (error > interval) {
681 error >>= 2;
682 if (likely(error <= interval))
683 adj = 1;
684 else
685 adj = timekeeping_bigadjust(error, &interval, &offset);
686 } else if (error < -interval) {
687 error >>= 2;
688 if (likely(error >= -interval)) {
689 adj = -1;
690 interval = -interval;
691 offset = -offset;
692 } else
693 adj = timekeeping_bigadjust(error, &interval, &offset);
694 } else
695 return;
697 timekeeper.mult += adj;
698 timekeeper.xtime_interval += interval;
699 timekeeper.xtime_nsec -= offset;
700 timekeeper.ntp_error -= (interval - offset) <<
701 timekeeper.ntp_error_shift;
705 * update_wall_time - Uses the current clocksource to increment the wall time
707 * Called from the timer interrupt, must hold a write on xtime_lock.
709 void update_wall_time(void)
711 struct clocksource *clock;
712 cycle_t offset;
713 u64 nsecs;
715 /* Make sure we're fully resumed: */
716 if (unlikely(timekeeping_suspended))
717 return;
719 clock = timekeeper.clock;
720 #ifdef CONFIG_GENERIC_TIME
721 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
722 #else
723 offset = timekeeper.cycle_interval;
724 #endif
725 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
727 /* normally this loop will run just once, however in the
728 * case of lost or late ticks, it will accumulate correctly.
730 while (offset >= timekeeper.cycle_interval) {
731 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
733 /* accumulate one interval */
734 offset -= timekeeper.cycle_interval;
735 clock->cycle_last += timekeeper.cycle_interval;
737 timekeeper.xtime_nsec += timekeeper.xtime_interval;
738 if (timekeeper.xtime_nsec >= nsecps) {
739 timekeeper.xtime_nsec -= nsecps;
740 xtime.tv_sec++;
741 second_overflow();
744 raw_time.tv_nsec += timekeeper.raw_interval;
745 if (raw_time.tv_nsec >= NSEC_PER_SEC) {
746 raw_time.tv_nsec -= NSEC_PER_SEC;
747 raw_time.tv_sec++;
750 /* accumulate error between NTP and clock interval */
751 timekeeper.ntp_error += tick_length;
752 timekeeper.ntp_error -= timekeeper.xtime_interval <<
753 timekeeper.ntp_error_shift;
756 /* correct the clock when NTP error is too big */
757 timekeeping_adjust(offset);
760 * Since in the loop above, we accumulate any amount of time
761 * in xtime_nsec over a second into xtime.tv_sec, its possible for
762 * xtime_nsec to be fairly small after the loop. Further, if we're
763 * slightly speeding the clocksource up in timekeeping_adjust(),
764 * its possible the required corrective factor to xtime_nsec could
765 * cause it to underflow.
767 * Now, we cannot simply roll the accumulated second back, since
768 * the NTP subsystem has been notified via second_overflow. So
769 * instead we push xtime_nsec forward by the amount we underflowed,
770 * and add that amount into the error.
772 * We'll correct this error next time through this function, when
773 * xtime_nsec is not as small.
775 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
776 s64 neg = -(s64)timekeeper.xtime_nsec;
777 timekeeper.xtime_nsec = 0;
778 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
781 /* store full nanoseconds into xtime after rounding it up and
782 * add the remainder to the error difference.
784 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
785 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
786 timekeeper.ntp_error += timekeeper.xtime_nsec <<
787 timekeeper.ntp_error_shift;
789 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
790 update_xtime_cache(nsecs);
792 /* check to see if there is a new clocksource to use */
793 update_vsyscall(&xtime, timekeeper.clock);
797 * getboottime - Return the real time of system boot.
798 * @ts: pointer to the timespec to be set
800 * Returns the time of day in a timespec.
802 * This is based on the wall_to_monotonic offset and the total suspend
803 * time. Calls to settimeofday will affect the value returned (which
804 * basically means that however wrong your real time clock is at boot time,
805 * you get the right time here).
807 void getboottime(struct timespec *ts)
809 struct timespec boottime;
811 boottime = timespec_add_safe(wall_to_monotonic, total_sleep_time);
812 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
816 * monotonic_to_bootbased - Convert the monotonic time to boot based.
817 * @ts: pointer to the timespec to be converted
819 void monotonic_to_bootbased(struct timespec *ts)
821 *ts = timespec_add_safe(*ts, total_sleep_time);
824 unsigned long get_seconds(void)
826 return xtime_cache.tv_sec;
828 EXPORT_SYMBOL(get_seconds);
831 struct timespec current_kernel_time(void)
833 struct timespec now;
834 unsigned long seq;
836 do {
837 seq = read_seqbegin(&xtime_lock);
839 now = xtime_cache;
840 } while (read_seqretry(&xtime_lock, seq));
842 return now;
844 EXPORT_SYMBOL(current_kernel_time);