Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / kernel / time / time.c
blobccdb351277eecf739605be2e98b44174de595883
1 /*
2 * linux/kernel/time.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file contains the interface functions for the various
7 * time related system calls: time, stime, gettimeofday, settimeofday,
8 * adjtime
9 */
11 * Modification history kernel/time.c
13 * 1993-09-02 Philip Gladstone
14 * Created file with time related functions from sched/core.c and adjtimex()
15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe
18 * kernel PLL updated to 1994-12-13 specs (rfc-1589)
19 * 1999-01-16 Ulrich Windl
20 * Introduced error checking for many cases in adjtimex().
21 * Updated NTP code according to technical memorandum Jan '96
22 * "A Kernel Model for Precision Timekeeping" by Dave Mills
23 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
24 * (Even though the technical memorandum forbids it)
25 * 2004-07-14 Christoph Lameter
26 * Added getnstimeofday to allow the posix timer functions to return
27 * with nanosecond accuracy
30 #include <linux/export.h>
31 #include <linux/kernel.h>
32 #include <linux/timex.h>
33 #include <linux/capability.h>
34 #include <linux/timekeeper_internal.h>
35 #include <linux/errno.h>
36 #include <linux/syscalls.h>
37 #include <linux/security.h>
38 #include <linux/fs.h>
39 #include <linux/math64.h>
40 #include <linux/ptrace.h>
42 #include <linux/uaccess.h>
43 #include <linux/compat.h>
44 #include <asm/unistd.h>
46 #include <generated/timeconst.h>
47 #include "timekeeping.h"
50 * The timezone where the local system is located. Used as a default by some
51 * programs who obtain this value by using gettimeofday.
53 struct timezone sys_tz;
55 EXPORT_SYMBOL(sys_tz);
57 #ifdef __ARCH_WANT_SYS_TIME
60 * sys_time() can be implemented in user-level using
61 * sys_gettimeofday(). Is this for backwards compatibility? If so,
62 * why not move it into the appropriate arch directory (for those
63 * architectures that need it).
65 SYSCALL_DEFINE1(time, time_t __user *, tloc)
67 time_t i = (time_t)ktime_get_real_seconds();
69 if (tloc) {
70 if (put_user(i,tloc))
71 return -EFAULT;
73 force_successful_syscall_return();
74 return i;
78 * sys_stime() can be implemented in user-level using
79 * sys_settimeofday(). Is this for backwards compatibility? If so,
80 * why not move it into the appropriate arch directory (for those
81 * architectures that need it).
84 SYSCALL_DEFINE1(stime, time_t __user *, tptr)
86 struct timespec64 tv;
87 int err;
89 if (get_user(tv.tv_sec, tptr))
90 return -EFAULT;
92 tv.tv_nsec = 0;
94 err = security_settime64(&tv, NULL);
95 if (err)
96 return err;
98 do_settimeofday64(&tv);
99 return 0;
102 #endif /* __ARCH_WANT_SYS_TIME */
104 #ifdef CONFIG_COMPAT
105 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
107 /* compat_time_t is a 32 bit "long" and needs to get converted. */
108 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
110 compat_time_t i;
112 i = (compat_time_t)ktime_get_real_seconds();
114 if (tloc) {
115 if (put_user(i,tloc))
116 return -EFAULT;
118 force_successful_syscall_return();
119 return i;
122 COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
124 struct timespec64 tv;
125 int err;
127 if (get_user(tv.tv_sec, tptr))
128 return -EFAULT;
130 tv.tv_nsec = 0;
132 err = security_settime64(&tv, NULL);
133 if (err)
134 return err;
136 do_settimeofday64(&tv);
137 return 0;
140 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
141 #endif
143 SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
144 struct timezone __user *, tz)
146 if (likely(tv != NULL)) {
147 struct timeval ktv;
148 do_gettimeofday(&ktv);
149 if (copy_to_user(tv, &ktv, sizeof(ktv)))
150 return -EFAULT;
152 if (unlikely(tz != NULL)) {
153 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
154 return -EFAULT;
156 return 0;
160 * In case for some reason the CMOS clock has not already been running
161 * in UTC, but in some local time: The first time we set the timezone,
162 * we will warp the clock so that it is ticking UTC time instead of
163 * local time. Presumably, if someone is setting the timezone then we
164 * are running in an environment where the programs understand about
165 * timezones. This should be done at boot time in the /etc/rc script,
166 * as soon as possible, so that the clock can be set right. Otherwise,
167 * various programs will get confused when the clock gets warped.
170 int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
172 static int firsttime = 1;
173 int error = 0;
175 if (tv && !timespec64_valid(tv))
176 return -EINVAL;
178 error = security_settime64(tv, tz);
179 if (error)
180 return error;
182 if (tz) {
183 /* Verify we're witin the +-15 hrs range */
184 if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
185 return -EINVAL;
187 sys_tz = *tz;
188 update_vsyscall_tz();
189 if (firsttime) {
190 firsttime = 0;
191 if (!tv)
192 timekeeping_warp_clock();
195 if (tv)
196 return do_settimeofday64(tv);
197 return 0;
200 SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
201 struct timezone __user *, tz)
203 struct timespec64 new_ts;
204 struct timeval user_tv;
205 struct timezone new_tz;
207 if (tv) {
208 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
209 return -EFAULT;
211 if (!timeval_valid(&user_tv))
212 return -EINVAL;
214 new_ts.tv_sec = user_tv.tv_sec;
215 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
217 if (tz) {
218 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
219 return -EFAULT;
222 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
225 #ifdef CONFIG_COMPAT
226 COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
227 struct timezone __user *, tz)
229 if (tv) {
230 struct timeval ktv;
232 do_gettimeofday(&ktv);
233 if (compat_put_timeval(&ktv, tv))
234 return -EFAULT;
236 if (tz) {
237 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
238 return -EFAULT;
241 return 0;
244 COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
245 struct timezone __user *, tz)
247 struct timespec64 new_ts;
248 struct timeval user_tv;
249 struct timezone new_tz;
251 if (tv) {
252 if (compat_get_timeval(&user_tv, tv))
253 return -EFAULT;
254 new_ts.tv_sec = user_tv.tv_sec;
255 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
257 if (tz) {
258 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
259 return -EFAULT;
262 return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
264 #endif
266 SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
268 struct timex txc; /* Local copy of parameter */
269 int ret;
271 /* Copy the user data space into the kernel copy
272 * structure. But bear in mind that the structures
273 * may change
275 if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
276 return -EFAULT;
277 ret = do_adjtimex(&txc);
278 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
281 #ifdef CONFIG_COMPAT
283 COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
285 struct timex txc;
286 int err, ret;
288 err = compat_get_timex(&txc, utp);
289 if (err)
290 return err;
292 ret = do_adjtimex(&txc);
294 err = compat_put_timex(utp, &txc);
295 if (err)
296 return err;
298 return ret;
300 #endif
303 * Convert jiffies to milliseconds and back.
305 * Avoid unnecessary multiplications/divisions in the
306 * two most common HZ cases:
308 unsigned int jiffies_to_msecs(const unsigned long j)
310 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
311 return (MSEC_PER_SEC / HZ) * j;
312 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
313 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
314 #else
315 # if BITS_PER_LONG == 32
316 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
317 HZ_TO_MSEC_SHR32;
318 # else
319 return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
320 # endif
321 #endif
323 EXPORT_SYMBOL(jiffies_to_msecs);
325 unsigned int jiffies_to_usecs(const unsigned long j)
328 * Hz usually doesn't go much further MSEC_PER_SEC.
329 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
331 BUILD_BUG_ON(HZ > USEC_PER_SEC);
333 #if !(USEC_PER_SEC % HZ)
334 return (USEC_PER_SEC / HZ) * j;
335 #else
336 # if BITS_PER_LONG == 32
337 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
338 # else
339 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
340 # endif
341 #endif
343 EXPORT_SYMBOL(jiffies_to_usecs);
346 * timespec_trunc - Truncate timespec to a granularity
347 * @t: Timespec
348 * @gran: Granularity in ns.
350 * Truncate a timespec to a granularity. Always rounds down. gran must
351 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
353 struct timespec timespec_trunc(struct timespec t, unsigned gran)
355 /* Avoid division in the common cases 1 ns and 1 s. */
356 if (gran == 1) {
357 /* nothing */
358 } else if (gran == NSEC_PER_SEC) {
359 t.tv_nsec = 0;
360 } else if (gran > 1 && gran < NSEC_PER_SEC) {
361 t.tv_nsec -= t.tv_nsec % gran;
362 } else {
363 WARN(1, "illegal file time granularity: %u", gran);
365 return t;
367 EXPORT_SYMBOL(timespec_trunc);
370 * mktime64 - Converts date to seconds.
371 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
372 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
373 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
375 * [For the Julian calendar (which was used in Russia before 1917,
376 * Britain & colonies before 1752, anywhere else before 1582,
377 * and is still in use by some communities) leave out the
378 * -year/100+year/400 terms, and add 10.]
380 * This algorithm was first published by Gauss (I think).
382 * A leap second can be indicated by calling this function with sec as
383 * 60 (allowable under ISO 8601). The leap second is treated the same
384 * as the following second since they don't exist in UNIX time.
386 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
387 * tomorrow - (allowable under ISO 8601) is supported.
389 time64_t mktime64(const unsigned int year0, const unsigned int mon0,
390 const unsigned int day, const unsigned int hour,
391 const unsigned int min, const unsigned int sec)
393 unsigned int mon = mon0, year = year0;
395 /* 1..12 -> 11,12,1..10 */
396 if (0 >= (int) (mon -= 2)) {
397 mon += 12; /* Puts Feb last since it has leap day */
398 year -= 1;
401 return ((((time64_t)
402 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
403 year*365 - 719499
404 )*24 + hour /* now have hours - midnight tomorrow handled here */
405 )*60 + min /* now have minutes */
406 )*60 + sec; /* finally seconds */
408 EXPORT_SYMBOL(mktime64);
411 * set_normalized_timespec - set timespec sec and nsec parts and normalize
413 * @ts: pointer to timespec variable to be set
414 * @sec: seconds to set
415 * @nsec: nanoseconds to set
417 * Set seconds and nanoseconds field of a timespec variable and
418 * normalize to the timespec storage format
420 * Note: The tv_nsec part is always in the range of
421 * 0 <= tv_nsec < NSEC_PER_SEC
422 * For negative values only the tv_sec field is negative !
424 void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
426 while (nsec >= NSEC_PER_SEC) {
428 * The following asm() prevents the compiler from
429 * optimising this loop into a modulo operation. See
430 * also __iter_div_u64_rem() in include/linux/time.h
432 asm("" : "+rm"(nsec));
433 nsec -= NSEC_PER_SEC;
434 ++sec;
436 while (nsec < 0) {
437 asm("" : "+rm"(nsec));
438 nsec += NSEC_PER_SEC;
439 --sec;
441 ts->tv_sec = sec;
442 ts->tv_nsec = nsec;
444 EXPORT_SYMBOL(set_normalized_timespec);
447 * ns_to_timespec - Convert nanoseconds to timespec
448 * @nsec: the nanoseconds value to be converted
450 * Returns the timespec representation of the nsec parameter.
452 struct timespec ns_to_timespec(const s64 nsec)
454 struct timespec ts;
455 s32 rem;
457 if (!nsec)
458 return (struct timespec) {0, 0};
460 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
461 if (unlikely(rem < 0)) {
462 ts.tv_sec--;
463 rem += NSEC_PER_SEC;
465 ts.tv_nsec = rem;
467 return ts;
469 EXPORT_SYMBOL(ns_to_timespec);
472 * ns_to_timeval - Convert nanoseconds to timeval
473 * @nsec: the nanoseconds value to be converted
475 * Returns the timeval representation of the nsec parameter.
477 struct timeval ns_to_timeval(const s64 nsec)
479 struct timespec ts = ns_to_timespec(nsec);
480 struct timeval tv;
482 tv.tv_sec = ts.tv_sec;
483 tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
485 return tv;
487 EXPORT_SYMBOL(ns_to_timeval);
489 struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
491 struct timespec64 ts = ns_to_timespec64(nsec);
492 struct __kernel_old_timeval tv;
494 tv.tv_sec = ts.tv_sec;
495 tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
497 return tv;
499 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
502 * set_normalized_timespec - set timespec sec and nsec parts and normalize
504 * @ts: pointer to timespec variable to be set
505 * @sec: seconds to set
506 * @nsec: nanoseconds to set
508 * Set seconds and nanoseconds field of a timespec variable and
509 * normalize to the timespec storage format
511 * Note: The tv_nsec part is always in the range of
512 * 0 <= tv_nsec < NSEC_PER_SEC
513 * For negative values only the tv_sec field is negative !
515 void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
517 while (nsec >= NSEC_PER_SEC) {
519 * The following asm() prevents the compiler from
520 * optimising this loop into a modulo operation. See
521 * also __iter_div_u64_rem() in include/linux/time.h
523 asm("" : "+rm"(nsec));
524 nsec -= NSEC_PER_SEC;
525 ++sec;
527 while (nsec < 0) {
528 asm("" : "+rm"(nsec));
529 nsec += NSEC_PER_SEC;
530 --sec;
532 ts->tv_sec = sec;
533 ts->tv_nsec = nsec;
535 EXPORT_SYMBOL(set_normalized_timespec64);
538 * ns_to_timespec64 - Convert nanoseconds to timespec64
539 * @nsec: the nanoseconds value to be converted
541 * Returns the timespec64 representation of the nsec parameter.
543 struct timespec64 ns_to_timespec64(const s64 nsec)
545 struct timespec64 ts;
546 s32 rem;
548 if (!nsec)
549 return (struct timespec64) {0, 0};
551 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
552 if (unlikely(rem < 0)) {
553 ts.tv_sec--;
554 rem += NSEC_PER_SEC;
556 ts.tv_nsec = rem;
558 return ts;
560 EXPORT_SYMBOL(ns_to_timespec64);
563 * msecs_to_jiffies: - convert milliseconds to jiffies
564 * @m: time in milliseconds
566 * conversion is done as follows:
568 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
570 * - 'too large' values [that would result in larger than
571 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
573 * - all other values are converted to jiffies by either multiplying
574 * the input value by a factor or dividing it with a factor and
575 * handling any 32-bit overflows.
576 * for the details see __msecs_to_jiffies()
578 * msecs_to_jiffies() checks for the passed in value being a constant
579 * via __builtin_constant_p() allowing gcc to eliminate most of the
580 * code, __msecs_to_jiffies() is called if the value passed does not
581 * allow constant folding and the actual conversion must be done at
582 * runtime.
583 * the _msecs_to_jiffies helpers are the HZ dependent conversion
584 * routines found in include/linux/jiffies.h
586 unsigned long __msecs_to_jiffies(const unsigned int m)
589 * Negative value, means infinite timeout:
591 if ((int)m < 0)
592 return MAX_JIFFY_OFFSET;
593 return _msecs_to_jiffies(m);
595 EXPORT_SYMBOL(__msecs_to_jiffies);
597 unsigned long __usecs_to_jiffies(const unsigned int u)
599 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
600 return MAX_JIFFY_OFFSET;
601 return _usecs_to_jiffies(u);
603 EXPORT_SYMBOL(__usecs_to_jiffies);
606 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
607 * that a remainder subtract here would not do the right thing as the
608 * resolution values don't fall on second boundries. I.e. the line:
609 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
610 * Note that due to the small error in the multiplier here, this
611 * rounding is incorrect for sufficiently large values of tv_nsec, but
612 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
613 * OK.
615 * Rather, we just shift the bits off the right.
617 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
618 * value to a scaled second value.
620 static unsigned long
621 __timespec64_to_jiffies(u64 sec, long nsec)
623 nsec = nsec + TICK_NSEC - 1;
625 if (sec >= MAX_SEC_IN_JIFFIES){
626 sec = MAX_SEC_IN_JIFFIES;
627 nsec = 0;
629 return ((sec * SEC_CONVERSION) +
630 (((u64)nsec * NSEC_CONVERSION) >>
631 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
635 static unsigned long
636 __timespec_to_jiffies(unsigned long sec, long nsec)
638 return __timespec64_to_jiffies((u64)sec, nsec);
641 unsigned long
642 timespec64_to_jiffies(const struct timespec64 *value)
644 return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
646 EXPORT_SYMBOL(timespec64_to_jiffies);
648 void
649 jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
652 * Convert jiffies to nanoseconds and separate with
653 * one divide.
655 u32 rem;
656 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
657 NSEC_PER_SEC, &rem);
658 value->tv_nsec = rem;
660 EXPORT_SYMBOL(jiffies_to_timespec64);
663 * We could use a similar algorithm to timespec_to_jiffies (with a
664 * different multiplier for usec instead of nsec). But this has a
665 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
666 * usec value, since it's not necessarily integral.
668 * We could instead round in the intermediate scaled representation
669 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
670 * perilous: the scaling introduces a small positive error, which
671 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
672 * units to the intermediate before shifting) leads to accidental
673 * overflow and overestimates.
675 * At the cost of one additional multiplication by a constant, just
676 * use the timespec implementation.
678 unsigned long
679 timeval_to_jiffies(const struct timeval *value)
681 return __timespec_to_jiffies(value->tv_sec,
682 value->tv_usec * NSEC_PER_USEC);
684 EXPORT_SYMBOL(timeval_to_jiffies);
686 void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
689 * Convert jiffies to nanoseconds and separate with
690 * one divide.
692 u32 rem;
694 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
695 NSEC_PER_SEC, &rem);
696 value->tv_usec = rem / NSEC_PER_USEC;
698 EXPORT_SYMBOL(jiffies_to_timeval);
701 * Convert jiffies/jiffies_64 to clock_t and back.
703 clock_t jiffies_to_clock_t(unsigned long x)
705 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
706 # if HZ < USER_HZ
707 return x * (USER_HZ / HZ);
708 # else
709 return x / (HZ / USER_HZ);
710 # endif
711 #else
712 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
713 #endif
715 EXPORT_SYMBOL(jiffies_to_clock_t);
717 unsigned long clock_t_to_jiffies(unsigned long x)
719 #if (HZ % USER_HZ)==0
720 if (x >= ~0UL / (HZ / USER_HZ))
721 return ~0UL;
722 return x * (HZ / USER_HZ);
723 #else
724 /* Don't worry about loss of precision here .. */
725 if (x >= ~0UL / HZ * USER_HZ)
726 return ~0UL;
728 /* .. but do try to contain it here */
729 return div_u64((u64)x * HZ, USER_HZ);
730 #endif
732 EXPORT_SYMBOL(clock_t_to_jiffies);
734 u64 jiffies_64_to_clock_t(u64 x)
736 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
737 # if HZ < USER_HZ
738 x = div_u64(x * USER_HZ, HZ);
739 # elif HZ > USER_HZ
740 x = div_u64(x, HZ / USER_HZ);
741 # else
742 /* Nothing to do */
743 # endif
744 #else
746 * There are better ways that don't overflow early,
747 * but even this doesn't overflow in hundreds of years
748 * in 64 bits, so..
750 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
751 #endif
752 return x;
754 EXPORT_SYMBOL(jiffies_64_to_clock_t);
756 u64 nsec_to_clock_t(u64 x)
758 #if (NSEC_PER_SEC % USER_HZ) == 0
759 return div_u64(x, NSEC_PER_SEC / USER_HZ);
760 #elif (USER_HZ % 512) == 0
761 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
762 #else
764 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
765 * overflow after 64.99 years.
766 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
768 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
769 #endif
772 u64 jiffies64_to_nsecs(u64 j)
774 #if !(NSEC_PER_SEC % HZ)
775 return (NSEC_PER_SEC / HZ) * j;
776 # else
777 return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
778 #endif
780 EXPORT_SYMBOL(jiffies64_to_nsecs);
783 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
785 * @n: nsecs in u64
787 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
788 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
789 * for scheduler, not for use in device drivers to calculate timeout value.
791 * note:
792 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
793 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
795 u64 nsecs_to_jiffies64(u64 n)
797 #if (NSEC_PER_SEC % HZ) == 0
798 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
799 return div_u64(n, NSEC_PER_SEC / HZ);
800 #elif (HZ % 512) == 0
801 /* overflow after 292 years if HZ = 1024 */
802 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
803 #else
805 * Generic case - optimized for cases where HZ is a multiple of 3.
806 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
808 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
809 #endif
811 EXPORT_SYMBOL(nsecs_to_jiffies64);
814 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
816 * @n: nsecs in u64
818 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
819 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
820 * for scheduler, not for use in device drivers to calculate timeout value.
822 * note:
823 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
824 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
826 unsigned long nsecs_to_jiffies(u64 n)
828 return (unsigned long)nsecs_to_jiffies64(n);
830 EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
833 * Add two timespec64 values and do a safety check for overflow.
834 * It's assumed that both values are valid (>= 0).
835 * And, each timespec64 is in normalized form.
837 struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
838 const struct timespec64 rhs)
840 struct timespec64 res;
842 set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
843 lhs.tv_nsec + rhs.tv_nsec);
845 if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
846 res.tv_sec = TIME64_MAX;
847 res.tv_nsec = 0;
850 return res;
853 int get_timespec64(struct timespec64 *ts,
854 const struct __kernel_timespec __user *uts)
856 struct __kernel_timespec kts;
857 int ret;
859 ret = copy_from_user(&kts, uts, sizeof(kts));
860 if (ret)
861 return -EFAULT;
863 ts->tv_sec = kts.tv_sec;
865 /* Zero out the padding for 32 bit systems or in compat mode */
866 if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()))
867 kts.tv_nsec &= 0xFFFFFFFFUL;
869 ts->tv_nsec = kts.tv_nsec;
871 return 0;
873 EXPORT_SYMBOL_GPL(get_timespec64);
875 int put_timespec64(const struct timespec64 *ts,
876 struct __kernel_timespec __user *uts)
878 struct __kernel_timespec kts = {
879 .tv_sec = ts->tv_sec,
880 .tv_nsec = ts->tv_nsec
883 return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
885 EXPORT_SYMBOL_GPL(put_timespec64);
887 int __compat_get_timespec64(struct timespec64 *ts64,
888 const struct compat_timespec __user *cts)
890 struct compat_timespec ts;
891 int ret;
893 ret = copy_from_user(&ts, cts, sizeof(ts));
894 if (ret)
895 return -EFAULT;
897 ts64->tv_sec = ts.tv_sec;
898 ts64->tv_nsec = ts.tv_nsec;
900 return 0;
903 int __compat_put_timespec64(const struct timespec64 *ts64,
904 struct compat_timespec __user *cts)
906 struct compat_timespec ts = {
907 .tv_sec = ts64->tv_sec,
908 .tv_nsec = ts64->tv_nsec
910 return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
913 int compat_get_timespec64(struct timespec64 *ts, const void __user *uts)
915 if (COMPAT_USE_64BIT_TIME)
916 return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
917 else
918 return __compat_get_timespec64(ts, uts);
920 EXPORT_SYMBOL_GPL(compat_get_timespec64);
922 int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
924 if (COMPAT_USE_64BIT_TIME)
925 return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
926 else
927 return __compat_put_timespec64(ts, uts);
929 EXPORT_SYMBOL_GPL(compat_put_timespec64);
931 int get_itimerspec64(struct itimerspec64 *it,
932 const struct __kernel_itimerspec __user *uit)
934 int ret;
936 ret = get_timespec64(&it->it_interval, &uit->it_interval);
937 if (ret)
938 return ret;
940 ret = get_timespec64(&it->it_value, &uit->it_value);
942 return ret;
944 EXPORT_SYMBOL_GPL(get_itimerspec64);
946 int put_itimerspec64(const struct itimerspec64 *it,
947 struct __kernel_itimerspec __user *uit)
949 int ret;
951 ret = put_timespec64(&it->it_interval, &uit->it_interval);
952 if (ret)
953 return ret;
955 ret = put_timespec64(&it->it_value, &uit->it_value);
957 return ret;
959 EXPORT_SYMBOL_GPL(put_itimerspec64);
961 int get_compat_itimerspec64(struct itimerspec64 *its,
962 const struct compat_itimerspec __user *uits)
965 if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
966 __compat_get_timespec64(&its->it_value, &uits->it_value))
967 return -EFAULT;
968 return 0;
970 EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
972 int put_compat_itimerspec64(const struct itimerspec64 *its,
973 struct compat_itimerspec __user *uits)
975 if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
976 __compat_put_timespec64(&its->it_value, &uits->it_value))
977 return -EFAULT;
978 return 0;
980 EXPORT_SYMBOL_GPL(put_compat_itimerspec64);