Adjust path for the upcoming 2.0 release
[dragonfly.git] / sys / kern / kern_time.c
blobe7905a88fdf4276bf1a11a28b83762bfef98b930
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93
34 * $FreeBSD: src/sys/kern/kern_time.c,v 1.68.2.1 2002/10/01 08:00:41 bde Exp $
35 * $DragonFly: src/sys/kern/kern_time.c,v 1.40 2008/04/02 14:16:16 sephe Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/buf.h>
41 #include <sys/sysproto.h>
42 #include <sys/resourcevar.h>
43 #include <sys/signalvar.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/sysent.h>
47 #include <sys/sysunion.h>
48 #include <sys/proc.h>
49 #include <sys/time.h>
50 #include <sys/vnode.h>
51 #include <sys/sysctl.h>
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <sys/msgport2.h>
55 #include <sys/thread2.h>
57 struct timezone tz;
60 * Time of day and interval timer support.
62 * These routines provide the kernel entry points to get and set
63 * the time-of-day and per-process interval timers. Subroutines
64 * here provide support for adding and subtracting timeval structures
65 * and decrementing interval timers, optionally reloading the interval
66 * timers when they expire.
69 static int nanosleep1 (struct timespec *rqt,
70 struct timespec *rmt);
71 static int settime (struct timeval *);
72 static void timevalfix (struct timeval *);
74 static int sleep_hard_us = 100;
75 SYSCTL_INT(_kern, OID_AUTO, sleep_hard_us, CTLFLAG_RW, &sleep_hard_us, 0, "")
77 static int
78 settime(struct timeval *tv)
80 struct timeval delta, tv1, tv2;
81 static struct timeval maxtime, laststep;
82 struct timespec ts;
83 int origcpu;
85 if ((origcpu = mycpu->gd_cpuid) != 0)
86 lwkt_setcpu_self(globaldata_find(0));
88 crit_enter();
89 microtime(&tv1);
90 delta = *tv;
91 timevalsub(&delta, &tv1);
94 * If the system is secure, we do not allow the time to be
95 * set to a value earlier than 1 second less than the highest
96 * time we have yet seen. The worst a miscreant can do in
97 * this circumstance is "freeze" time. He couldn't go
98 * back to the past.
100 * We similarly do not allow the clock to be stepped more
101 * than one second, nor more than once per second. This allows
102 * a miscreant to make the clock march double-time, but no worse.
104 if (securelevel > 1) {
105 if (delta.tv_sec < 0 || delta.tv_usec < 0) {
107 * Update maxtime to latest time we've seen.
109 if (tv1.tv_sec > maxtime.tv_sec)
110 maxtime = tv1;
111 tv2 = *tv;
112 timevalsub(&tv2, &maxtime);
113 if (tv2.tv_sec < -1) {
114 tv->tv_sec = maxtime.tv_sec - 1;
115 kprintf("Time adjustment clamped to -1 second\n");
117 } else {
118 if (tv1.tv_sec == laststep.tv_sec) {
119 crit_exit();
120 return (EPERM);
122 if (delta.tv_sec > 1) {
123 tv->tv_sec = tv1.tv_sec + 1;
124 kprintf("Time adjustment clamped to +1 second\n");
126 laststep = *tv;
130 ts.tv_sec = tv->tv_sec;
131 ts.tv_nsec = tv->tv_usec * 1000;
132 set_timeofday(&ts);
133 crit_exit();
135 if (origcpu != 0)
136 lwkt_setcpu_self(globaldata_find(origcpu));
138 resettodr();
139 return (0);
142 /* ARGSUSED */
144 sys_clock_gettime(struct clock_gettime_args *uap)
146 struct timespec ats;
148 switch(uap->clock_id) {
149 case CLOCK_REALTIME:
150 nanotime(&ats);
151 return (copyout(&ats, uap->tp, sizeof(ats)));
152 case CLOCK_MONOTONIC:
153 nanouptime(&ats);
154 return (copyout(&ats, uap->tp, sizeof(ats)));
155 default:
156 return (EINVAL);
160 /* ARGSUSED */
162 sys_clock_settime(struct clock_settime_args *uap)
164 struct thread *td = curthread;
165 struct timeval atv;
166 struct timespec ats;
167 int error;
169 if ((error = suser(td)) != 0)
170 return (error);
171 switch(uap->clock_id) {
172 case CLOCK_REALTIME:
173 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
174 return (error);
175 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000)
176 return (EINVAL);
177 /* XXX Don't convert nsec->usec and back */
178 TIMESPEC_TO_TIMEVAL(&atv, &ats);
179 error = settime(&atv);
180 return (error);
181 default:
182 return (EINVAL);
187 sys_clock_getres(struct clock_getres_args *uap)
189 struct timespec ts;
191 switch(uap->clock_id) {
192 case CLOCK_REALTIME:
193 case CLOCK_MONOTONIC:
195 * Round up the result of the division cheaply
196 * by adding 1. Rounding up is especially important
197 * if rounding down would give 0. Perfect rounding
198 * is unimportant.
200 ts.tv_sec = 0;
201 ts.tv_nsec = 1000000000 / sys_cputimer->freq + 1;
202 return(copyout(&ts, uap->tp, sizeof(ts)));
203 default:
204 return(EINVAL);
209 * nanosleep1()
211 * This is a general helper function for nanosleep() (aka sleep() aka
212 * usleep()).
214 * If there is less then one tick's worth of time left and
215 * we haven't done a yield, or the remaining microseconds is
216 * ridiculously low, do a yield. This avoids having
217 * to deal with systimer overheads when the system is under
218 * heavy loads. If we have done a yield already then use
219 * a systimer and an uninterruptable thread wait.
221 * If there is more then a tick's worth of time left,
222 * calculate the baseline ticks and use an interruptable
223 * tsleep, then handle the fine-grained delay on the next
224 * loop. This usually results in two sleeps occuring, a long one
225 * and a short one.
227 static void
228 ns1_systimer(systimer_t info)
230 lwkt_schedule(info->data);
233 static int
234 nanosleep1(struct timespec *rqt, struct timespec *rmt)
236 static int nanowait;
237 struct timespec ts, ts2, ts3;
238 struct timeval tv;
239 int error;
240 int tried_yield;
242 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
243 return (EINVAL);
244 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
245 return (0);
246 nanouptime(&ts);
247 timespecadd(&ts, rqt); /* ts = target timestamp compare */
248 TIMESPEC_TO_TIMEVAL(&tv, rqt); /* tv = sleep interval */
249 tried_yield = 0;
251 for (;;) {
252 int ticks;
253 struct systimer info;
255 ticks = tv.tv_usec / tick; /* approximate */
257 if (tv.tv_sec == 0 && ticks == 0) {
258 thread_t td = curthread;
259 if (tried_yield || tv.tv_usec < sleep_hard_us) {
260 tried_yield = 0;
261 uio_yield();
262 } else {
263 crit_enter_quick(td);
264 systimer_init_oneshot(&info, ns1_systimer,
265 td, tv.tv_usec);
266 lwkt_deschedule_self(td);
267 crit_exit_quick(td);
268 lwkt_switch();
269 systimer_del(&info); /* make sure it's gone */
271 error = iscaught(td->td_lwp);
272 } else if (tv.tv_sec == 0) {
273 error = tsleep(&nanowait, PCATCH, "nanslp", ticks);
274 } else {
275 ticks = tvtohz_low(&tv); /* also handles overflow */
276 error = tsleep(&nanowait, PCATCH, "nanslp", ticks);
278 nanouptime(&ts2);
279 if (error && error != EWOULDBLOCK) {
280 if (error == ERESTART)
281 error = EINTR;
282 if (rmt != NULL) {
283 timespecsub(&ts, &ts2);
284 if (ts.tv_sec < 0)
285 timespecclear(&ts);
286 *rmt = ts;
288 return (error);
290 if (timespeccmp(&ts2, &ts, >=))
291 return (0);
292 ts3 = ts;
293 timespecsub(&ts3, &ts2);
294 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
298 /* ARGSUSED */
300 sys_nanosleep(struct nanosleep_args *uap)
302 int error;
303 struct timespec rqt;
304 struct timespec rmt;
306 error = copyin(uap->rqtp, &rqt, sizeof(rqt));
307 if (error)
308 return (error);
310 error = nanosleep1(&rqt, &rmt);
313 * copyout the residual if nanosleep was interrupted.
315 if (error && uap->rmtp)
316 error = copyout(&rmt, uap->rmtp, sizeof(rmt));
317 return (error);
320 /* ARGSUSED */
322 sys_gettimeofday(struct gettimeofday_args *uap)
324 struct timeval atv;
325 int error = 0;
327 if (uap->tp) {
328 microtime(&atv);
329 if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp,
330 sizeof (atv))))
331 return (error);
333 if (uap->tzp)
334 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp,
335 sizeof (tz));
336 return (error);
339 /* ARGSUSED */
341 sys_settimeofday(struct settimeofday_args *uap)
343 struct thread *td = curthread;
344 struct timeval atv;
345 struct timezone atz;
346 int error;
348 if ((error = suser(td)))
349 return (error);
350 /* Verify all parameters before changing time. */
351 if (uap->tv) {
352 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
353 sizeof(atv))))
354 return (error);
355 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
356 return (EINVAL);
358 if (uap->tzp &&
359 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz))))
360 return (error);
361 if (uap->tv && (error = settime(&atv)))
362 return (error);
363 if (uap->tzp)
364 tz = atz;
365 return (0);
368 static void
369 kern_adjtime_common(void)
371 if ((ntp_delta >= 0 && ntp_delta < ntp_default_tick_delta) ||
372 (ntp_delta < 0 && ntp_delta > -ntp_default_tick_delta))
373 ntp_tick_delta = ntp_delta;
374 else if (ntp_delta > ntp_big_delta)
375 ntp_tick_delta = 10 * ntp_default_tick_delta;
376 else if (ntp_delta < -ntp_big_delta)
377 ntp_tick_delta = -10 * ntp_default_tick_delta;
378 else if (ntp_delta > 0)
379 ntp_tick_delta = ntp_default_tick_delta;
380 else
381 ntp_tick_delta = -ntp_default_tick_delta;
384 void
385 kern_adjtime(int64_t delta, int64_t *odelta)
387 int origcpu;
389 if ((origcpu = mycpu->gd_cpuid) != 0)
390 lwkt_setcpu_self(globaldata_find(0));
392 crit_enter();
393 *odelta = ntp_delta;
394 ntp_delta = delta;
395 kern_adjtime_common();
396 crit_exit();
398 if (origcpu != 0)
399 lwkt_setcpu_self(globaldata_find(origcpu));
402 static void
403 kern_get_ntp_delta(int64_t *delta)
405 int origcpu;
407 if ((origcpu = mycpu->gd_cpuid) != 0)
408 lwkt_setcpu_self(globaldata_find(0));
410 crit_enter();
411 *delta = ntp_delta;
412 crit_exit();
414 if (origcpu != 0)
415 lwkt_setcpu_self(globaldata_find(origcpu));
418 void
419 kern_reladjtime(int64_t delta)
421 int origcpu;
423 if ((origcpu = mycpu->gd_cpuid) != 0)
424 lwkt_setcpu_self(globaldata_find(0));
426 crit_enter();
427 ntp_delta += delta;
428 kern_adjtime_common();
429 crit_exit();
431 if (origcpu != 0)
432 lwkt_setcpu_self(globaldata_find(origcpu));
435 static void
436 kern_adjfreq(int64_t rate)
438 int origcpu;
440 if ((origcpu = mycpu->gd_cpuid) != 0)
441 lwkt_setcpu_self(globaldata_find(0));
443 crit_enter();
444 ntp_tick_permanent = rate;
445 crit_exit();
447 if (origcpu != 0)
448 lwkt_setcpu_self(globaldata_find(origcpu));
451 /* ARGSUSED */
453 sys_adjtime(struct adjtime_args *uap)
455 struct thread *td = curthread;
456 struct timeval atv;
457 int64_t ndelta, odelta;
458 int error;
460 if ((error = suser(td)))
461 return (error);
462 if ((error =
463 copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval))))
464 return (error);
467 * Compute the total correction and the rate at which to apply it.
468 * Round the adjustment down to a whole multiple of the per-tick
469 * delta, so that after some number of incremental changes in
470 * hardclock(), tickdelta will become zero, lest the correction
471 * overshoot and start taking us away from the desired final time.
473 ndelta = (int64_t)atv.tv_sec * 1000000000 + atv.tv_usec * 1000;
474 kern_adjtime(ndelta, &odelta);
476 if (uap->olddelta) {
477 atv.tv_sec = odelta / 1000000000;
478 atv.tv_usec = odelta % 1000000000 / 1000;
479 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta,
480 sizeof(struct timeval));
482 return (0);
485 static int
486 sysctl_adjtime(SYSCTL_HANDLER_ARGS)
488 int64_t delta;
489 int error;
491 if (req->newptr != NULL) {
492 if (suser(curthread))
493 return (EPERM);
494 error = SYSCTL_IN(req, &delta, sizeof(delta));
495 if (error)
496 return (error);
497 kern_reladjtime(delta);
500 if (req->oldptr)
501 kern_get_ntp_delta(&delta);
502 error = SYSCTL_OUT(req, &delta, sizeof(delta));
503 return (error);
507 * delta is in nanoseconds.
509 static int
510 sysctl_delta(SYSCTL_HANDLER_ARGS)
512 int64_t delta, old_delta;
513 int error;
515 if (req->newptr != NULL) {
516 if (suser(curthread))
517 return (EPERM);
518 error = SYSCTL_IN(req, &delta, sizeof(delta));
519 if (error)
520 return (error);
521 kern_adjtime(delta, &old_delta);
524 if (req->oldptr != NULL)
525 kern_get_ntp_delta(&old_delta);
526 error = SYSCTL_OUT(req, &old_delta, sizeof(old_delta));
527 return (error);
531 * frequency is in nanoseconds per second shifted left 32.
532 * kern_adjfreq() needs it in nanoseconds per tick shifted left 32.
534 static int
535 sysctl_adjfreq(SYSCTL_HANDLER_ARGS)
537 int64_t freqdelta;
538 int error;
540 if (req->newptr != NULL) {
541 if (suser(curthread))
542 return (EPERM);
543 error = SYSCTL_IN(req, &freqdelta, sizeof(freqdelta));
544 if (error)
545 return (error);
547 freqdelta /= hz;
548 kern_adjfreq(freqdelta);
551 if (req->oldptr != NULL)
552 freqdelta = ntp_tick_permanent * hz;
553 error = SYSCTL_OUT(req, &freqdelta, sizeof(freqdelta));
554 if (error)
555 return (error);
557 return (0);
560 SYSCTL_NODE(_kern, OID_AUTO, ntp, CTLFLAG_RW, 0, "NTP related controls");
561 SYSCTL_PROC(_kern_ntp, OID_AUTO, permanent,
562 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
563 sysctl_adjfreq, "Q", "permanent correction per second");
564 SYSCTL_PROC(_kern_ntp, OID_AUTO, delta,
565 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
566 sysctl_delta, "Q", "one-time delta");
567 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, big_delta, CTLFLAG_RD,
568 &ntp_big_delta, sizeof(ntp_big_delta), "Q",
569 "threshold for fast adjustment");
570 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, tick_delta, CTLFLAG_RD,
571 &ntp_tick_delta, sizeof(ntp_tick_delta), "LU",
572 "per-tick adjustment");
573 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, default_tick_delta, CTLFLAG_RD,
574 &ntp_default_tick_delta, sizeof(ntp_default_tick_delta), "LU",
575 "default per-tick adjustment");
576 SYSCTL_OPAQUE(_kern_ntp, OID_AUTO, next_leap_second, CTLFLAG_RW,
577 &ntp_leap_second, sizeof(ntp_leap_second), "LU",
578 "next leap second");
579 SYSCTL_INT(_kern_ntp, OID_AUTO, insert_leap_second, CTLFLAG_RW,
580 &ntp_leap_insert, 0, "insert or remove leap second");
581 SYSCTL_PROC(_kern_ntp, OID_AUTO, adjust,
582 CTLTYPE_QUAD|CTLFLAG_RW, 0, 0,
583 sysctl_adjtime, "Q", "relative adjust for delta");
586 * Get value of an interval timer. The process virtual and
587 * profiling virtual time timers are kept in the p_stats area, since
588 * they can be swapped out. These are kept internally in the
589 * way they are specified externally: in time until they expire.
591 * The real time interval timer is kept in the process table slot
592 * for the process, and its value (it_value) is kept as an
593 * absolute time rather than as a delta, so that it is easy to keep
594 * periodic real-time signals from drifting.
596 * Virtual time timers are processed in the hardclock() routine of
597 * kern_clock.c. The real time timer is processed by a timeout
598 * routine, called from the softclock() routine. Since a callout
599 * may be delayed in real time due to interrupt processing in the system,
600 * it is possible for the real time timeout routine (realitexpire, given below),
601 * to be delayed in real time past when it is supposed to occur. It
602 * does not suffice, therefore, to reload the real timer .it_value from the
603 * real time timers .it_interval. Rather, we compute the next time in
604 * absolute time the timer should go off.
606 /* ARGSUSED */
608 sys_getitimer(struct getitimer_args *uap)
610 struct proc *p = curproc;
611 struct timeval ctv;
612 struct itimerval aitv;
614 if (uap->which > ITIMER_PROF)
615 return (EINVAL);
616 crit_enter();
617 if (uap->which == ITIMER_REAL) {
619 * Convert from absolute to relative time in .it_value
620 * part of real time timer. If time for real time timer
621 * has passed return 0, else return difference between
622 * current time and time for the timer to go off.
624 aitv = p->p_realtimer;
625 if (timevalisset(&aitv.it_value)) {
626 getmicrouptime(&ctv);
627 if (timevalcmp(&aitv.it_value, &ctv, <))
628 timevalclear(&aitv.it_value);
629 else
630 timevalsub(&aitv.it_value, &ctv);
632 } else {
633 aitv = p->p_timer[uap->which];
635 crit_exit();
636 return (copyout((caddr_t)&aitv, (caddr_t)uap->itv,
637 sizeof (struct itimerval)));
640 /* ARGSUSED */
642 sys_setitimer(struct setitimer_args *uap)
644 struct itimerval aitv;
645 struct timeval ctv;
646 struct itimerval *itvp;
647 struct proc *p = curproc;
648 int error;
650 if (uap->which > ITIMER_PROF)
651 return (EINVAL);
652 itvp = uap->itv;
653 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv,
654 sizeof(struct itimerval))))
655 return (error);
656 if ((uap->itv = uap->oitv) &&
657 (error = sys_getitimer((struct getitimer_args *)uap)))
658 return (error);
659 if (itvp == 0)
660 return (0);
661 if (itimerfix(&aitv.it_value))
662 return (EINVAL);
663 if (!timevalisset(&aitv.it_value))
664 timevalclear(&aitv.it_interval);
665 else if (itimerfix(&aitv.it_interval))
666 return (EINVAL);
667 crit_enter();
668 if (uap->which == ITIMER_REAL) {
669 if (timevalisset(&p->p_realtimer.it_value))
670 callout_stop(&p->p_ithandle);
671 if (timevalisset(&aitv.it_value))
672 callout_reset(&p->p_ithandle,
673 tvtohz_high(&aitv.it_value), realitexpire, p);
674 getmicrouptime(&ctv);
675 timevaladd(&aitv.it_value, &ctv);
676 p->p_realtimer = aitv;
677 } else {
678 p->p_timer[uap->which] = aitv;
680 crit_exit();
681 return (0);
685 * Real interval timer expired:
686 * send process whose timer expired an alarm signal.
687 * If time is not set up to reload, then just return.
688 * Else compute next time timer should go off which is > current time.
689 * This is where delay in processing this timeout causes multiple
690 * SIGALRM calls to be compressed into one.
691 * tvtohz_high() always adds 1 to allow for the time until the next clock
692 * interrupt being strictly less than 1 clock tick, but we don't want
693 * that here since we want to appear to be in sync with the clock
694 * interrupt even when we're delayed.
696 void
697 realitexpire(void *arg)
699 struct proc *p;
700 struct timeval ctv, ntv;
702 p = (struct proc *)arg;
703 ksignal(p, SIGALRM);
704 if (!timevalisset(&p->p_realtimer.it_interval)) {
705 timevalclear(&p->p_realtimer.it_value);
706 return;
708 for (;;) {
709 crit_enter();
710 timevaladd(&p->p_realtimer.it_value,
711 &p->p_realtimer.it_interval);
712 getmicrouptime(&ctv);
713 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
714 ntv = p->p_realtimer.it_value;
715 timevalsub(&ntv, &ctv);
716 callout_reset(&p->p_ithandle, tvtohz_low(&ntv),
717 realitexpire, p);
718 crit_exit();
719 return;
721 crit_exit();
726 * Check that a proposed value to load into the .it_value or
727 * .it_interval part of an interval timer is acceptable, and
728 * fix it to have at least minimal value (i.e. if it is less
729 * than the resolution of the clock, round it up.)
732 itimerfix(struct timeval *tv)
735 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
736 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
737 return (EINVAL);
738 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
739 tv->tv_usec = tick;
740 return (0);
744 * Decrement an interval timer by a specified number
745 * of microseconds, which must be less than a second,
746 * i.e. < 1000000. If the timer expires, then reload
747 * it. In this case, carry over (usec - old value) to
748 * reduce the value reloaded into the timer so that
749 * the timer does not drift. This routine assumes
750 * that it is called in a context where the timers
751 * on which it is operating cannot change in value.
754 itimerdecr(struct itimerval *itp, int usec)
757 if (itp->it_value.tv_usec < usec) {
758 if (itp->it_value.tv_sec == 0) {
759 /* expired, and already in next interval */
760 usec -= itp->it_value.tv_usec;
761 goto expire;
763 itp->it_value.tv_usec += 1000000;
764 itp->it_value.tv_sec--;
766 itp->it_value.tv_usec -= usec;
767 usec = 0;
768 if (timevalisset(&itp->it_value))
769 return (1);
770 /* expired, exactly at end of interval */
771 expire:
772 if (timevalisset(&itp->it_interval)) {
773 itp->it_value = itp->it_interval;
774 itp->it_value.tv_usec -= usec;
775 if (itp->it_value.tv_usec < 0) {
776 itp->it_value.tv_usec += 1000000;
777 itp->it_value.tv_sec--;
779 } else
780 itp->it_value.tv_usec = 0; /* sec is already 0 */
781 return (0);
785 * Add and subtract routines for timevals.
786 * N.B.: subtract routine doesn't deal with
787 * results which are before the beginning,
788 * it just gets very confused in this case.
789 * Caveat emptor.
791 void
792 timevaladd(struct timeval *t1, const struct timeval *t2)
795 t1->tv_sec += t2->tv_sec;
796 t1->tv_usec += t2->tv_usec;
797 timevalfix(t1);
800 void
801 timevalsub(struct timeval *t1, const struct timeval *t2)
804 t1->tv_sec -= t2->tv_sec;
805 t1->tv_usec -= t2->tv_usec;
806 timevalfix(t1);
809 static void
810 timevalfix(struct timeval *t1)
813 if (t1->tv_usec < 0) {
814 t1->tv_sec--;
815 t1->tv_usec += 1000000;
817 if (t1->tv_usec >= 1000000) {
818 t1->tv_sec++;
819 t1->tv_usec -= 1000000;
824 * ratecheck(): simple time-based rate-limit checking.
827 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
829 struct timeval tv, delta;
830 int rv = 0;
832 getmicrouptime(&tv); /* NB: 10ms precision */
833 delta = tv;
834 timevalsub(&delta, lasttime);
837 * check for 0,0 is so that the message will be seen at least once,
838 * even if interval is huge.
840 if (timevalcmp(&delta, mininterval, >=) ||
841 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
842 *lasttime = tv;
843 rv = 1;
846 return (rv);
850 * ppsratecheck(): packets (or events) per second limitation.
852 * Return 0 if the limit is to be enforced (e.g. the caller
853 * should drop a packet because of the rate limitation).
855 * maxpps of 0 always causes zero to be returned. maxpps of -1
856 * always causes 1 to be returned; this effectively defeats rate
857 * limiting.
859 * Note that we maintain the struct timeval for compatibility
860 * with other bsd systems. We reuse the storage and just monitor
861 * clock ticks for minimal overhead.
864 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
866 int now;
869 * Reset the last time and counter if this is the first call
870 * or more than a second has passed since the last update of
871 * lasttime.
873 now = ticks;
874 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
875 lasttime->tv_sec = now;
876 *curpps = 1;
877 return (maxpps != 0);
878 } else {
879 (*curpps)++; /* NB: ignore potential overflow */
880 return (maxpps < 0 || *curpps < maxpps);