2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93
34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $
35 * $DragonFly: src/sys/kern/kern_time.c,v 1.40 2008/04/02 14:16:16 sephe Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/resourcevar.h>
43 #include <sys/signalvar.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/sysent.h>
47 #include <sys/sysunion.h>
51 #include <sys/vnode.h>
52 #include <sys/sysctl.h>
53 #include <sys/kern_syscall.h>
55 #include <vm/vm_extern.h>
56 #include <sys/msgport2.h>
57 #include <sys/thread2.h>
62 * Time of day and interval timer support.
64 * These routines provide the kernel entry points to get and set
65 * the time-of-day and per-process interval timers. Subroutines
66 * here provide support for adding and subtracting timeval structures
67 * and decrementing interval timers, optionally reloading the interval
68 * timers when they expire.
71 static int nanosleep1(struct timespec
*rqt
, struct timespec
*rmt
);
72 static int settime(struct timeval
*);
73 static void timevalfix(struct timeval
*);
75 static int sleep_hard_us
= 100;
76 SYSCTL_INT(_kern
, OID_AUTO
, sleep_hard_us
, CTLFLAG_RW
, &sleep_hard_us
, 0, "")
79 settime(struct timeval
*tv
)
81 struct timeval delta
, tv1
, tv2
;
82 static struct timeval maxtime
, laststep
;
86 if ((origcpu
= mycpu
->gd_cpuid
) != 0)
87 lwkt_setcpu_self(globaldata_find(0));
92 timevalsub(&delta
, &tv1
);
95 * If the system is secure, we do not allow the time to be
96 * set to a value earlier than 1 second less than the highest
97 * time we have yet seen. The worst a miscreant can do in
98 * this circumstance is "freeze" time. He couldn't go
101 * We similarly do not allow the clock to be stepped more
102 * than one second, nor more than once per second. This allows
103 * a miscreant to make the clock march double-time, but no worse.
105 if (securelevel
> 1) {
106 if (delta
.tv_sec
< 0 || delta
.tv_usec
< 0) {
108 * Update maxtime to latest time we've seen.
110 if (tv1
.tv_sec
> maxtime
.tv_sec
)
113 timevalsub(&tv2
, &maxtime
);
114 if (tv2
.tv_sec
< -1) {
115 tv
->tv_sec
= maxtime
.tv_sec
- 1;
116 kprintf("Time adjustment clamped to -1 second\n");
119 if (tv1
.tv_sec
== laststep
.tv_sec
) {
123 if (delta
.tv_sec
> 1) {
124 tv
->tv_sec
= tv1
.tv_sec
+ 1;
125 kprintf("Time adjustment clamped to +1 second\n");
131 ts
.tv_sec
= tv
->tv_sec
;
132 ts
.tv_nsec
= tv
->tv_usec
* 1000;
137 lwkt_setcpu_self(globaldata_find(origcpu
));
144 kern_clock_gettime(clockid_t clock_id
, struct timespec
*ats
)
152 case CLOCK_MONOTONIC
:
164 sys_clock_gettime(struct clock_gettime_args
*uap
)
169 error
= kern_clock_gettime(uap
->clock_id
, &ats
);
171 error
= copyout(&ats
, uap
->tp
, sizeof(ats
));
177 kern_clock_settime(clockid_t clock_id
, struct timespec
*ats
)
179 struct thread
*td
= curthread
;
183 if ((error
= priv_check(td
, PRIV_CLOCK_SETTIME
)) != 0)
185 if (clock_id
!= CLOCK_REALTIME
)
187 if (ats
->tv_nsec
< 0 || ats
->tv_nsec
>= 1000000000)
190 TIMESPEC_TO_TIMEVAL(&atv
, ats
);
191 error
= settime(&atv
);
197 sys_clock_settime(struct clock_settime_args
*uap
)
202 if ((error
= copyin(uap
->tp
, &ats
, sizeof(ats
))) != 0)
205 return (kern_clock_settime(uap
->clock_id
, &ats
));
209 kern_clock_getres(clockid_t clock_id
, struct timespec
*ts
)
215 case CLOCK_MONOTONIC
:
217 * Round up the result of the division cheaply
218 * by adding 1. Rounding up is especially important
219 * if rounding down would give 0. Perfect rounding
223 ts
->tv_nsec
= 1000000000 / sys_cputimer
->freq
+ 1;
235 sys_clock_getres(struct clock_getres_args
*uap
)
240 error
= kern_clock_getres(uap
->clock_id
, &ts
);
242 error
= copyout(&ts
, uap
->tp
, sizeof(ts
));
250 * This is a general helper function for nanosleep() (aka sleep() aka
253 * If there is less then one tick's worth of time left and
254 * we haven't done a yield, or the remaining microseconds is
255 * ridiculously low, do a yield. This avoids having
256 * to deal with systimer overheads when the system is under
257 * heavy loads. If we have done a yield already then use
258 * a systimer and an uninterruptable thread wait.
260 * If there is more then a tick's worth of time left,
261 * calculate the baseline ticks and use an interruptable
262 * tsleep, then handle the fine-grained delay on the next
263 * loop. This usually results in two sleeps occuring, a long one
267 ns1_systimer(systimer_t info
)
269 lwkt_schedule(info
->data
);
273 nanosleep1(struct timespec
*rqt
, struct timespec
*rmt
)
276 struct timespec ts
, ts2
, ts3
;
281 if (rqt
->tv_nsec
< 0 || rqt
->tv_nsec
>= 1000000000)
283 if (rqt
->tv_sec
< 0 || (rqt
->tv_sec
== 0 && rqt
->tv_nsec
== 0))
286 timespecadd(&ts
, rqt
); /* ts = target timestamp compare */
287 TIMESPEC_TO_TIMEVAL(&tv
, rqt
); /* tv = sleep interval */
292 struct systimer info
;
294 ticks
= tv
.tv_usec
/ tick
; /* approximate */
296 if (tv
.tv_sec
== 0 && ticks
== 0) {
297 thread_t td
= curthread
;
298 if (tried_yield
|| tv
.tv_usec
< sleep_hard_us
) {
302 crit_enter_quick(td
);
303 systimer_init_oneshot(&info
, ns1_systimer
,
305 lwkt_deschedule_self(td
);
308 systimer_del(&info
); /* make sure it's gone */
310 error
= iscaught(td
->td_lwp
);
311 } else if (tv
.tv_sec
== 0) {
312 error
= tsleep(&nanowait
, PCATCH
, "nanslp", ticks
);
314 ticks
= tvtohz_low(&tv
); /* also handles overflow */
315 error
= tsleep(&nanowait
, PCATCH
, "nanslp", ticks
);
318 if (error
&& error
!= EWOULDBLOCK
) {
319 if (error
== ERESTART
)
322 timespecsub(&ts
, &ts2
);
329 if (timespeccmp(&ts2
, &ts
, >=))
332 timespecsub(&ts3
, &ts2
);
333 TIMESPEC_TO_TIMEVAL(&tv
, &ts3
);
339 sys_nanosleep(struct nanosleep_args
*uap
)
345 error
= copyin(uap
->rqtp
, &rqt
, sizeof(rqt
));
349 error
= nanosleep1(&rqt
, &rmt
);
352 * copyout the residual if nanosleep was interrupted.
354 if (error
&& uap
->rmtp
) {
357 error2
= copyout(&rmt
, uap
->rmtp
, sizeof(rmt
));
366 sys_gettimeofday(struct gettimeofday_args
*uap
)
373 if ((error
= copyout((caddr_t
)&atv
, (caddr_t
)uap
->tp
,
378 error
= copyout((caddr_t
)&tz
, (caddr_t
)uap
->tzp
,
385 sys_settimeofday(struct settimeofday_args
*uap
)
387 struct thread
*td
= curthread
;
392 if ((error
= priv_check(td
, PRIV_SETTIMEOFDAY
)))
394 /* Verify all parameters before changing time. */
396 if ((error
= copyin((caddr_t
)uap
->tv
, (caddr_t
)&atv
,
399 if (atv
.tv_usec
< 0 || atv
.tv_usec
>= 1000000)
403 (error
= copyin((caddr_t
)uap
->tzp
, (caddr_t
)&atz
, sizeof(atz
))))
405 if (uap
->tv
&& (error
= settime(&atv
)))
413 kern_adjtime_common(void)
415 if ((ntp_delta
>= 0 && ntp_delta
< ntp_default_tick_delta
) ||
416 (ntp_delta
< 0 && ntp_delta
> -ntp_default_tick_delta
))
417 ntp_tick_delta
= ntp_delta
;
418 else if (ntp_delta
> ntp_big_delta
)
419 ntp_tick_delta
= 10 * ntp_default_tick_delta
;
420 else if (ntp_delta
< -ntp_big_delta
)
421 ntp_tick_delta
= -10 * ntp_default_tick_delta
;
422 else if (ntp_delta
> 0)
423 ntp_tick_delta
= ntp_default_tick_delta
;
425 ntp_tick_delta
= -ntp_default_tick_delta
;
429 kern_adjtime(int64_t delta
, int64_t *odelta
)
433 if ((origcpu
= mycpu
->gd_cpuid
) != 0)
434 lwkt_setcpu_self(globaldata_find(0));
439 kern_adjtime_common();
443 lwkt_setcpu_self(globaldata_find(origcpu
));
447 kern_get_ntp_delta(int64_t *delta
)
451 if ((origcpu
= mycpu
->gd_cpuid
) != 0)
452 lwkt_setcpu_self(globaldata_find(0));
459 lwkt_setcpu_self(globaldata_find(origcpu
));
463 kern_reladjtime(int64_t delta
)
467 if ((origcpu
= mycpu
->gd_cpuid
) != 0)
468 lwkt_setcpu_self(globaldata_find(0));
472 kern_adjtime_common();
476 lwkt_setcpu_self(globaldata_find(origcpu
));
480 kern_adjfreq(int64_t rate
)
484 if ((origcpu
= mycpu
->gd_cpuid
) != 0)
485 lwkt_setcpu_self(globaldata_find(0));
488 ntp_tick_permanent
= rate
;
492 lwkt_setcpu_self(globaldata_find(origcpu
));
497 sys_adjtime(struct adjtime_args
*uap
)
499 struct thread
*td
= curthread
;
501 int64_t ndelta
, odelta
;
504 if ((error
= priv_check(td
, PRIV_ADJTIME
)))
507 copyin((caddr_t
)uap
->delta
, (caddr_t
)&atv
, sizeof(struct timeval
))))
511 * Compute the total correction and the rate at which to apply it.
512 * Round the adjustment down to a whole multiple of the per-tick
513 * delta, so that after some number of incremental changes in
514 * hardclock(), tickdelta will become zero, lest the correction
515 * overshoot and start taking us away from the desired final time.
517 ndelta
= (int64_t)atv
.tv_sec
* 1000000000 + atv
.tv_usec
* 1000;
518 kern_adjtime(ndelta
, &odelta
);
521 atv
.tv_sec
= odelta
/ 1000000000;
522 atv
.tv_usec
= odelta
% 1000000000 / 1000;
523 (void) copyout((caddr_t
)&atv
, (caddr_t
)uap
->olddelta
,
524 sizeof(struct timeval
));
530 sysctl_adjtime(SYSCTL_HANDLER_ARGS
)
535 if (req
->newptr
!= NULL
) {
536 if (priv_check(curthread
, PRIV_ROOT
))
538 error
= SYSCTL_IN(req
, &delta
, sizeof(delta
));
541 kern_reladjtime(delta
);
545 kern_get_ntp_delta(&delta
);
546 error
= SYSCTL_OUT(req
, &delta
, sizeof(delta
));
551 * delta is in nanoseconds.
554 sysctl_delta(SYSCTL_HANDLER_ARGS
)
556 int64_t delta
, old_delta
;
559 if (req
->newptr
!= NULL
) {
560 if (priv_check(curthread
, PRIV_ROOT
))
562 error
= SYSCTL_IN(req
, &delta
, sizeof(delta
));
565 kern_adjtime(delta
, &old_delta
);
568 if (req
->oldptr
!= NULL
)
569 kern_get_ntp_delta(&old_delta
);
570 error
= SYSCTL_OUT(req
, &old_delta
, sizeof(old_delta
));
575 * frequency is in nanoseconds per second shifted left 32.
576 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32.
579 sysctl_adjfreq(SYSCTL_HANDLER_ARGS
)
584 if (req
->newptr
!= NULL
) {
585 if (priv_check(curthread
, PRIV_ROOT
))
587 error
= SYSCTL_IN(req
, &freqdelta
, sizeof(freqdelta
));
592 kern_adjfreq(freqdelta
);
595 if (req
->oldptr
!= NULL
)
596 freqdelta
= ntp_tick_permanent
* hz
;
597 error
= SYSCTL_OUT(req
, &freqdelta
, sizeof(freqdelta
));
604 SYSCTL_NODE(_kern
, OID_AUTO
, ntp
, CTLFLAG_RW
, 0, "NTP related controls");
605 SYSCTL_PROC(_kern_ntp
, OID_AUTO
, permanent
,
606 CTLTYPE_QUAD
|CTLFLAG_RW
, 0, 0,
607 sysctl_adjfreq
, "Q", "permanent correction per second");
608 SYSCTL_PROC(_kern_ntp
, OID_AUTO
, delta
,
609 CTLTYPE_QUAD
|CTLFLAG_RW
, 0, 0,
610 sysctl_delta
, "Q", "one-time delta");
611 SYSCTL_OPAQUE(_kern_ntp
, OID_AUTO
, big_delta
, CTLFLAG_RD
,
612 &ntp_big_delta
, sizeof(ntp_big_delta
), "Q",
613 "threshold for fast adjustment");
614 SYSCTL_OPAQUE(_kern_ntp
, OID_AUTO
, tick_delta
, CTLFLAG_RD
,
615 &ntp_tick_delta
, sizeof(ntp_tick_delta
), "LU",
616 "per-tick adjustment");
617 SYSCTL_OPAQUE(_kern_ntp
, OID_AUTO
, default_tick_delta
, CTLFLAG_RD
,
618 &ntp_default_tick_delta
, sizeof(ntp_default_tick_delta
), "LU",
619 "default per-tick adjustment");
620 SYSCTL_OPAQUE(_kern_ntp
, OID_AUTO
, next_leap_second
, CTLFLAG_RW
,
621 &ntp_leap_second
, sizeof(ntp_leap_second
), "LU",
623 SYSCTL_INT(_kern_ntp
, OID_AUTO
, insert_leap_second
, CTLFLAG_RW
,
624 &ntp_leap_insert
, 0, "insert or remove leap second");
625 SYSCTL_PROC(_kern_ntp
, OID_AUTO
, adjust
,
626 CTLTYPE_QUAD
|CTLFLAG_RW
, 0, 0,
627 sysctl_adjtime
, "Q", "relative adjust for delta");
630 * Get value of an interval timer. The process virtual and
631 * profiling virtual time timers are kept in the p_stats area, since
632 * they can be swapped out. These are kept internally in the
633 * way they are specified externally: in time until they expire.
635 * The real time interval timer is kept in the process table slot
636 * for the process, and its value (it_value) is kept as an
637 * absolute time rather than as a delta, so that it is easy to keep
638 * periodic real-time signals from drifting.
640 * Virtual time timers are processed in the hardclock() routine of
641 * kern_clock.c. The real time timer is processed by a timeout
642 * routine, called from the softclock() routine. Since a callout
643 * may be delayed in real time due to interrupt processing in the system,
644 * it is possible for the real time timeout routine (realitexpire, given below),
645 * to be delayed in real time past when it is supposed to occur. It
646 * does not suffice, therefore, to reload the real timer .it_value from the
647 * real time timers .it_interval. Rather, we compute the next time in
648 * absolute time the timer should go off.
652 sys_getitimer(struct getitimer_args
*uap
)
654 struct proc
*p
= curproc
;
656 struct itimerval aitv
;
658 if (uap
->which
> ITIMER_PROF
)
661 if (uap
->which
== ITIMER_REAL
) {
663 * Convert from absolute to relative time in .it_value
664 * part of real time timer. If time for real time timer
665 * has passed return 0, else return difference between
666 * current time and time for the timer to go off.
668 aitv
= p
->p_realtimer
;
669 if (timevalisset(&aitv
.it_value
)) {
670 getmicrouptime(&ctv
);
671 if (timevalcmp(&aitv
.it_value
, &ctv
, <))
672 timevalclear(&aitv
.it_value
);
674 timevalsub(&aitv
.it_value
, &ctv
);
677 aitv
= p
->p_timer
[uap
->which
];
680 return (copyout((caddr_t
)&aitv
, (caddr_t
)uap
->itv
,
681 sizeof (struct itimerval
)));
686 sys_setitimer(struct setitimer_args
*uap
)
688 struct itimerval aitv
;
690 struct itimerval
*itvp
;
691 struct proc
*p
= curproc
;
694 if (uap
->which
> ITIMER_PROF
)
697 if (itvp
&& (error
= copyin((caddr_t
)itvp
, (caddr_t
)&aitv
,
698 sizeof(struct itimerval
))))
700 if ((uap
->itv
= uap
->oitv
) &&
701 (error
= sys_getitimer((struct getitimer_args
*)uap
)))
705 if (itimerfix(&aitv
.it_value
))
707 if (!timevalisset(&aitv
.it_value
))
708 timevalclear(&aitv
.it_interval
);
709 else if (itimerfix(&aitv
.it_interval
))
712 if (uap
->which
== ITIMER_REAL
) {
713 if (timevalisset(&p
->p_realtimer
.it_value
))
714 callout_stop(&p
->p_ithandle
);
715 if (timevalisset(&aitv
.it_value
))
716 callout_reset(&p
->p_ithandle
,
717 tvtohz_high(&aitv
.it_value
), realitexpire
, p
);
718 getmicrouptime(&ctv
);
719 timevaladd(&aitv
.it_value
, &ctv
);
720 p
->p_realtimer
= aitv
;
722 p
->p_timer
[uap
->which
] = aitv
;
729 * Real interval timer expired:
730 * send process whose timer expired an alarm signal.
731 * If time is not set up to reload, then just return.
732 * Else compute next time timer should go off which is > current time.
733 * This is where delay in processing this timeout causes multiple
734 * SIGALRM calls to be compressed into one.
735 * tvtohz_high() always adds 1 to allow for the time until the next clock
736 * interrupt being strictly less than 1 clock tick, but we don't want
737 * that here since we want to appear to be in sync with the clock
738 * interrupt even when we're delayed.
741 realitexpire(void *arg
)
744 struct timeval ctv
, ntv
;
746 p
= (struct proc
*)arg
;
748 if (!timevalisset(&p
->p_realtimer
.it_interval
)) {
749 timevalclear(&p
->p_realtimer
.it_value
);
754 timevaladd(&p
->p_realtimer
.it_value
,
755 &p
->p_realtimer
.it_interval
);
756 getmicrouptime(&ctv
);
757 if (timevalcmp(&p
->p_realtimer
.it_value
, &ctv
, >)) {
758 ntv
= p
->p_realtimer
.it_value
;
759 timevalsub(&ntv
, &ctv
);
760 callout_reset(&p
->p_ithandle
, tvtohz_low(&ntv
),
770 * Check that a proposed value to load into the .it_value or
771 * .it_interval part of an interval timer is acceptable, and
772 * fix it to have at least minimal value (i.e. if it is less
773 * than the resolution of the clock, round it up.)
776 itimerfix(struct timeval
*tv
)
779 if (tv
->tv_sec
< 0 || tv
->tv_sec
> 100000000 ||
780 tv
->tv_usec
< 0 || tv
->tv_usec
>= 1000000)
782 if (tv
->tv_sec
== 0 && tv
->tv_usec
!= 0 && tv
->tv_usec
< tick
)
788 * Decrement an interval timer by a specified number
789 * of microseconds, which must be less than a second,
790 * i.e. < 1000000. If the timer expires, then reload
791 * it. In this case, carry over (usec - old value) to
792 * reduce the value reloaded into the timer so that
793 * the timer does not drift. This routine assumes
794 * that it is called in a context where the timers
795 * on which it is operating cannot change in value.
798 itimerdecr(struct itimerval
*itp
, int usec
)
801 if (itp
->it_value
.tv_usec
< usec
) {
802 if (itp
->it_value
.tv_sec
== 0) {
803 /* expired, and already in next interval */
804 usec
-= itp
->it_value
.tv_usec
;
807 itp
->it_value
.tv_usec
+= 1000000;
808 itp
->it_value
.tv_sec
--;
810 itp
->it_value
.tv_usec
-= usec
;
812 if (timevalisset(&itp
->it_value
))
814 /* expired, exactly at end of interval */
816 if (timevalisset(&itp
->it_interval
)) {
817 itp
->it_value
= itp
->it_interval
;
818 itp
->it_value
.tv_usec
-= usec
;
819 if (itp
->it_value
.tv_usec
< 0) {
820 itp
->it_value
.tv_usec
+= 1000000;
821 itp
->it_value
.tv_sec
--;
824 itp
->it_value
.tv_usec
= 0; /* sec is already 0 */
829 * Add and subtract routines for timevals.
830 * N.B.: subtract routine doesn't deal with
831 * results which are before the beginning,
832 * it just gets very confused in this case.
836 timevaladd(struct timeval
*t1
, const struct timeval
*t2
)
839 t1
->tv_sec
+= t2
->tv_sec
;
840 t1
->tv_usec
+= t2
->tv_usec
;
845 timevalsub(struct timeval
*t1
, const struct timeval
*t2
)
848 t1
->tv_sec
-= t2
->tv_sec
;
849 t1
->tv_usec
-= t2
->tv_usec
;
854 timevalfix(struct timeval
*t1
)
857 if (t1
->tv_usec
< 0) {
859 t1
->tv_usec
+= 1000000;
861 if (t1
->tv_usec
>= 1000000) {
863 t1
->tv_usec
-= 1000000;
868 * ratecheck(): simple time-based rate-limit checking.
871 ratecheck(struct timeval
*lasttime
, const struct timeval
*mininterval
)
873 struct timeval tv
, delta
;
876 getmicrouptime(&tv
); /* NB: 10ms precision */
878 timevalsub(&delta
, lasttime
);
881 * check for 0,0 is so that the message will be seen at least once,
882 * even if interval is huge.
884 if (timevalcmp(&delta
, mininterval
, >=) ||
885 (lasttime
->tv_sec
== 0 && lasttime
->tv_usec
== 0)) {
894 * ppsratecheck(): packets (or events) per second limitation.
896 * Return 0 if the limit is to be enforced (e.g. the caller
897 * should drop a packet because of the rate limitation).
899 * maxpps of 0 always causes zero to be returned. maxpps of -1
900 * always causes 1 to be returned; this effectively defeats rate
903 * Note that we maintain the struct timeval for compatibility
904 * with other bsd systems. We reuse the storage and just monitor
905 * clock ticks for minimal overhead.
908 ppsratecheck(struct timeval
*lasttime
, int *curpps
, int maxpps
)
913 * Reset the last time and counter if this is the first call
914 * or more than a second has passed since the last update of
918 if (lasttime
->tv_sec
== 0 || (u_int
)(now
- lasttime
->tv_sec
) >= hz
) {
919 lasttime
->tv_sec
= now
;
921 return (maxpps
!= 0);
923 (*curpps
)++; /* NB: ignore potential overflow */
924 return (maxpps
< 0 || *curpps
< maxpps
);