2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
68 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
72 #include "opt_pctrack.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/callout.h>
77 #include <sys/kernel.h>
78 #include <sys/kinfo.h>
80 #include <sys/malloc.h>
81 #include <sys/resource.h>
82 #include <sys/resourcevar.h>
83 #include <sys/signalvar.h>
85 #include <sys/timex.h>
86 #include <sys/timepps.h>
87 #include <sys/upmap.h>
89 #include <sys/sysctl.h>
90 #include <sys/kcollect.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_extern.h>
97 #include <sys/thread2.h>
98 #include <sys/spinlock2.h>
100 #include <machine/cpu.h>
101 #include <machine/limits.h>
102 #include <machine/smp.h>
103 #include <machine/cpufunc.h>
104 #include <machine/specialreg.h>
105 #include <machine/clock.h>
108 #include <sys/gmon.h>
112 static void do_pctrack(struct intrframe
*frame
, int which
);
115 static void initclocks (void *dummy
);
116 SYSINIT(clocks
, SI_BOOT2_CLOCKS
, SI_ORDER_FIRST
, initclocks
, NULL
);
119 * Some of these don't belong here, but it's easiest to concentrate them.
120 * Note that cpu_time counts in microseconds, but most userland programs
121 * just compare relative times against the total by delta.
123 struct kinfo_cputime cputime_percpu
[MAXCPU
];
125 struct kinfo_pcheader cputime_pcheader
= { PCTRACK_SIZE
, PCTRACK_ARYSIZE
};
126 struct kinfo_pctrack cputime_pctrack
[MAXCPU
][PCTRACK_SIZE
];
129 static int sniff_enable
= 1;
130 static int sniff_target
= -1;
131 SYSCTL_INT(_kern
, OID_AUTO
, sniff_enable
, CTLFLAG_RW
, &sniff_enable
, 0 , "");
132 SYSCTL_INT(_kern
, OID_AUTO
, sniff_target
, CTLFLAG_RW
, &sniff_target
, 0 , "");
135 sysctl_cputime(SYSCTL_HANDLER_ARGS
)
139 size_t size
= sizeof(struct kinfo_cputime
);
140 struct kinfo_cputime tmp
;
143 * NOTE: For security reasons, only root can sniff %rip
145 root_error
= priv_check_cred(curthread
->td_ucred
, PRIV_ROOT
, 0);
147 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
148 tmp
= cputime_percpu
[cpu
];
149 if (root_error
== 0) {
151 (int64_t)globaldata_find(cpu
)->gd_sample_pc
;
153 (int64_t)globaldata_find(cpu
)->gd_sample_sp
;
155 if ((error
= SYSCTL_OUT(req
, &tmp
, size
)) != 0)
159 if (root_error
== 0) {
161 int n
= sniff_target
;
171 SYSCTL_PROC(_kern
, OID_AUTO
, cputime
, (CTLTYPE_OPAQUE
|CTLFLAG_RD
), 0, 0,
172 sysctl_cputime
, "S,kinfo_cputime", "CPU time statistics");
175 sysctl_cp_time(SYSCTL_HANDLER_ARGS
)
177 long cpu_states
[CPUSTATES
] = {0};
179 size_t size
= sizeof(cpu_states
);
181 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
182 cpu_states
[CP_USER
] += cputime_percpu
[cpu
].cp_user
;
183 cpu_states
[CP_NICE
] += cputime_percpu
[cpu
].cp_nice
;
184 cpu_states
[CP_SYS
] += cputime_percpu
[cpu
].cp_sys
;
185 cpu_states
[CP_INTR
] += cputime_percpu
[cpu
].cp_intr
;
186 cpu_states
[CP_IDLE
] += cputime_percpu
[cpu
].cp_idle
;
189 error
= SYSCTL_OUT(req
, cpu_states
, size
);
194 SYSCTL_PROC(_kern
, OID_AUTO
, cp_time
, (CTLTYPE_LONG
|CTLFLAG_RD
), 0, 0,
195 sysctl_cp_time
, "LU", "CPU time statistics");
198 sysctl_cp_times(SYSCTL_HANDLER_ARGS
)
200 long cpu_states
[CPUSTATES
] = {0};
202 size_t size
= sizeof(cpu_states
);
204 for (error
= 0, cpu
= 0; error
== 0 && cpu
< ncpus
; ++cpu
) {
205 cpu_states
[CP_USER
] = cputime_percpu
[cpu
].cp_user
;
206 cpu_states
[CP_NICE
] = cputime_percpu
[cpu
].cp_nice
;
207 cpu_states
[CP_SYS
] = cputime_percpu
[cpu
].cp_sys
;
208 cpu_states
[CP_INTR
] = cputime_percpu
[cpu
].cp_intr
;
209 cpu_states
[CP_IDLE
] = cputime_percpu
[cpu
].cp_idle
;
210 error
= SYSCTL_OUT(req
, cpu_states
, size
);
216 SYSCTL_PROC(_kern
, OID_AUTO
, cp_times
, (CTLTYPE_LONG
|CTLFLAG_RD
), 0, 0,
217 sysctl_cp_times
, "LU", "per-CPU time statistics");
220 * boottime is used to calculate the 'real' uptime. Do not confuse this with
221 * microuptime(). microtime() is not drift compensated. The real uptime
222 * with compensation is nanotime() - bootime. boottime is recalculated
223 * whenever the real time is set based on the compensated elapsed time
224 * in seconds (gd->gd_time_seconds).
226 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
227 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
230 * WARNING! time_second can backstep on time corrections. Also, unlike
231 * time_second, time_uptime is not a "real" time_t (seconds
232 * since the Epoch) but seconds since booting.
234 struct timespec boottime
; /* boot time (realtime) for reference only */
235 time_t time_second
; /* read-only 'passive' realtime in seconds */
236 time_t time_uptime
; /* read-only 'passive' uptime in seconds */
239 * basetime is used to calculate the compensated real time of day. The
240 * basetime can be modified on a per-tick basis by the adjtime(),
241 * ntp_adjtime(), and sysctl-based time correction APIs.
243 * Note that frequency corrections can also be made by adjusting
246 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is
247 * used on both SMP and UP systems to avoid MP races between cpu's and
248 * interrupt races on UP systems.
251 __uint32_t time_second
;
252 sysclock_t cpuclock_base
;
255 #define BASETIME_ARYSIZE 16
256 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1)
257 static struct timespec basetime
[BASETIME_ARYSIZE
];
258 static struct hardtime hardtime
[BASETIME_ARYSIZE
];
259 static volatile int basetime_index
;
262 sysctl_get_basetime(SYSCTL_HANDLER_ARGS
)
269 * Because basetime data and index may be updated by another cpu,
270 * a load fence is required to ensure that the data we read has
271 * not been speculatively read relative to a possibly updated index.
273 index
= basetime_index
;
275 bt
= &basetime
[index
];
276 error
= SYSCTL_OUT(req
, bt
, sizeof(*bt
));
280 SYSCTL_STRUCT(_kern
, KERN_BOOTTIME
, boottime
, CTLFLAG_RD
,
281 &boottime
, timespec
, "System boottime");
282 SYSCTL_PROC(_kern
, OID_AUTO
, basetime
, CTLTYPE_STRUCT
|CTLFLAG_RD
, 0, 0,
283 sysctl_get_basetime
, "S,timespec", "System basetime");
285 static void hardclock(systimer_t info
, int, struct intrframe
*frame
);
286 static void statclock(systimer_t info
, int, struct intrframe
*frame
);
287 static void schedclock(systimer_t info
, int, struct intrframe
*frame
);
288 static void getnanotime_nbt(struct timespec
*nbt
, struct timespec
*tsp
);
290 int ticks
; /* system master ticks at hz */
291 int clocks_running
; /* tsleep/timeout clocks operational */
292 int64_t nsec_adj
; /* ntpd per-tick adjustment in nsec << 32 */
293 int64_t nsec_acc
; /* accumulator */
294 int sched_ticks
; /* global schedule clock ticks */
296 /* NTPD time correction fields */
297 int64_t ntp_tick_permanent
; /* per-tick adjustment in nsec << 32 */
298 int64_t ntp_tick_acc
; /* accumulator for per-tick adjustment */
299 int64_t ntp_delta
; /* one-time correction in nsec */
300 int64_t ntp_big_delta
= 1000000000;
301 int32_t ntp_tick_delta
; /* current adjustment rate */
302 int32_t ntp_default_tick_delta
; /* adjustment rate for ntp_delta */
303 time_t ntp_leap_second
; /* time of next leap second */
304 int ntp_leap_insert
; /* whether to insert or remove a second */
305 struct spinlock ntp_spin
;
308 * Finish initializing clock frequencies and start all clocks running.
312 initclocks(void *dummy
)
314 /*psratio = profhz / stathz;*/
315 spin_init(&ntp_spin
, "ntp");
319 kpmap
->tsc_freq
= tsc_frequency
;
320 kpmap
->tick_freq
= hz
;
325 * Called on a per-cpu basis from the idle thread bootstrap on each cpu
326 * during SMP initialization.
328 * This routine is called concurrently during low-level SMP initialization
329 * and may not block in any way. Meaning, among other things, we can't
330 * acquire any tokens.
333 initclocks_pcpu(void)
335 struct globaldata
*gd
= mycpu
;
338 if (gd
->gd_cpuid
== 0) {
339 gd
->gd_time_seconds
= 1;
340 gd
->gd_cpuclock_base
= sys_cputimer
->count();
341 hardtime
[0].time_second
= gd
->gd_time_seconds
;
342 hardtime
[0].cpuclock_base
= gd
->gd_cpuclock_base
;
344 gd
->gd_time_seconds
= globaldata_find(0)->gd_time_seconds
;
345 gd
->gd_cpuclock_base
= globaldata_find(0)->gd_cpuclock_base
;
348 systimer_intr_enable();
354 * Called on a 10-second interval after the system is operational.
355 * Return the collection data for USERPCT and install the data for
356 * SYSTPCT and IDLEPCT.
360 collect_cputime_callback(int n
)
362 static long cpu_base
[CPUSTATES
];
363 long cpu_states
[CPUSTATES
];
368 bzero(cpu_states
, sizeof(cpu_states
));
369 for (n
= 0; n
< ncpus
; ++n
) {
370 cpu_states
[CP_USER
] += cputime_percpu
[n
].cp_user
;
371 cpu_states
[CP_NICE
] += cputime_percpu
[n
].cp_nice
;
372 cpu_states
[CP_SYS
] += cputime_percpu
[n
].cp_sys
;
373 cpu_states
[CP_INTR
] += cputime_percpu
[n
].cp_intr
;
374 cpu_states
[CP_IDLE
] += cputime_percpu
[n
].cp_idle
;
378 for (n
= 0; n
< CPUSTATES
; ++n
) {
379 total
= cpu_states
[n
] - cpu_base
[n
];
380 cpu_base
[n
] = cpu_states
[n
];
381 cpu_states
[n
] = total
;
384 if (acc
== 0) /* prevent degenerate divide by 0 */
386 lsb
= acc
/ (10000 * 2);
387 kcollect_setvalue(KCOLLECT_SYSTPCT
,
388 (cpu_states
[CP_SYS
] + lsb
) * 10000 / acc
);
389 kcollect_setvalue(KCOLLECT_IDLEPCT
,
390 (cpu_states
[CP_IDLE
] + lsb
) * 10000 / acc
);
391 kcollect_setvalue(KCOLLECT_INTRPCT
,
392 (cpu_states
[CP_INTR
] + lsb
) * 10000 / acc
);
393 return((cpu_states
[CP_USER
] + cpu_states
[CP_NICE
] + lsb
) * 10000 / acc
);
397 * This routine is called on just the BSP, just after SMP initialization
398 * completes to * finish initializing any clocks that might contend/block
399 * (e.g. like on a token). We can't do this in initclocks_pcpu() because
400 * that function is called from the idle thread bootstrap for each cpu and
401 * not allowed to block at all.
405 initclocks_other(void *dummy
)
407 struct globaldata
*ogd
= mycpu
;
408 struct globaldata
*gd
;
411 for (n
= 0; n
< ncpus
; ++n
) {
412 lwkt_setcpu_self(globaldata_find(n
));
416 * Use a non-queued periodic systimer to prevent multiple
417 * ticks from building up if the sysclock jumps forward
418 * (8254 gets reset). The sysclock will never jump backwards.
419 * Our time sync is based on the actual sysclock, not the
422 * Install statclock before hardclock to prevent statclock
423 * from misinterpreting gd_flags for tick assignment when
426 systimer_init_periodic_flags(&gd
->gd_statclock
, statclock
,
428 SYSTF_MSSYNC
| SYSTF_FIRST
);
429 systimer_init_periodic_flags(&gd
->gd_hardclock
, hardclock
,
430 NULL
, hz
, SYSTF_MSSYNC
);
431 /* XXX correct the frequency for scheduler / estcpu tests */
432 systimer_init_periodic_flags(&gd
->gd_schedclock
, schedclock
,
433 NULL
, ESTCPUFREQ
, SYSTF_MSSYNC
);
435 lwkt_setcpu_self(ogd
);
438 * Regular data collection
440 kcollect_register(KCOLLECT_USERPCT
, "user", collect_cputime_callback
,
441 KCOLLECT_SCALE(KCOLLECT_USERPCT_FORMAT
, 0));
442 kcollect_register(KCOLLECT_SYSTPCT
, "syst", NULL
,
443 KCOLLECT_SCALE(KCOLLECT_SYSTPCT_FORMAT
, 0));
444 kcollect_register(KCOLLECT_IDLEPCT
, "idle", NULL
,
445 KCOLLECT_SCALE(KCOLLECT_IDLEPCT_FORMAT
, 0));
447 SYSINIT(clocks2
, SI_BOOT2_POST_SMP
, SI_ORDER_ANY
, initclocks_other
, NULL
);
450 * This sets the current real time of day. Timespecs are in seconds and
451 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base,
452 * instead we adjust basetime so basetime + gd_* results in the current
453 * time of day. This way the gd_* fields are guaranteed to represent
454 * a monotonically increasing 'uptime' value.
456 * When set_timeofday() is called from userland, the system call forces it
457 * onto cpu #0 since only cpu #0 can update basetime_index.
460 set_timeofday(struct timespec
*ts
)
462 struct timespec
*nbt
;
466 * XXX SMP / non-atomic basetime updates
469 ni
= (basetime_index
+ 1) & BASETIME_ARYMASK
;
473 nbt
->tv_sec
= ts
->tv_sec
- nbt
->tv_sec
;
474 nbt
->tv_nsec
= ts
->tv_nsec
- nbt
->tv_nsec
;
475 if (nbt
->tv_nsec
< 0) {
476 nbt
->tv_nsec
+= 1000000000;
481 * Note that basetime diverges from boottime as the clock drift is
482 * compensated for, so we cannot do away with boottime. When setting
483 * the absolute time of day the drift is 0 (for an instant) and we
484 * can simply assign boottime to basetime.
486 * Note that nanouptime() is based on gd_time_seconds which is drift
487 * compensated up to a point (it is guaranteed to remain monotonically
488 * increasing). gd_time_seconds is thus our best uptime guess and
489 * suitable for use in the boottime calculation. It is already taken
490 * into account in the basetime calculation above.
492 spin_lock(&ntp_spin
);
493 boottime
.tv_sec
= nbt
->tv_sec
;
497 * We now have a new basetime, make sure all other cpus have it,
498 * then update the index.
502 spin_unlock(&ntp_spin
);
508 * Each cpu has its own hardclock, but we only increments ticks and softticks
511 * NOTE! systimer! the MP lock might not be held here. We can only safely
512 * manipulate objects owned by the current cpu.
515 hardclock(systimer_t info
, int in_ipi
, struct intrframe
*frame
)
519 struct globaldata
*gd
= mycpu
;
521 if ((gd
->gd_reqflags
& RQF_IPIQ
) == 0 && lwkt_need_ipiq_process(gd
)) {
522 /* Defer to doreti on passive IPIQ processing */
527 * We update the compensation base to calculate fine-grained time
528 * from the sys_cputimer on a per-cpu basis in order to avoid
529 * having to mess around with locks. sys_cputimer is assumed to
530 * be consistent across all cpus. CPU N copies the base state from
531 * CPU 0 using the same FIFO trick that we use for basetime (so we
532 * don't catch a CPU 0 update in the middle).
534 * Note that we never allow info->time (aka gd->gd_hardclock.time)
535 * to reverse index gd_cpuclock_base, but that it is possible for
536 * it to temporarily get behind in the seconds if something in the
537 * system locks interrupts for a long period of time. Since periodic
538 * timers count events, though everything should resynch again
541 if (gd
->gd_cpuid
== 0) {
544 cputicks
= info
->time
- gd
->gd_cpuclock_base
;
545 if (cputicks
>= sys_cputimer
->freq
) {
546 cputicks
/= sys_cputimer
->freq
;
547 if (cputicks
!= 0 && cputicks
!= 1)
548 kprintf("Warning: hardclock missed > 1 sec\n");
549 gd
->gd_time_seconds
+= cputicks
;
550 gd
->gd_cpuclock_base
+= sys_cputimer
->freq
* cputicks
;
551 /* uncorrected monotonic 1-sec gran */
552 time_uptime
+= cputicks
;
554 ni
= (basetime_index
+ 1) & BASETIME_ARYMASK
;
555 hardtime
[ni
].time_second
= gd
->gd_time_seconds
;
556 hardtime
[ni
].cpuclock_base
= gd
->gd_cpuclock_base
;
562 gd
->gd_time_seconds
= hardtime
[ni
].time_second
;
563 gd
->gd_cpuclock_base
= hardtime
[ni
].cpuclock_base
;
567 * The system-wide ticks counter and NTP related timedelta/tickdelta
568 * adjustments only occur on cpu #0. NTP adjustments are accomplished
569 * by updating basetime.
571 if (gd
->gd_cpuid
== 0) {
572 struct timespec
*nbt
;
580 if (tco
->tc_poll_pps
)
581 tco
->tc_poll_pps(tco
);
585 * Calculate the new basetime index. We are in a critical section
586 * on cpu #0 and can safely play with basetime_index. Start
587 * with the current basetime and then make adjustments.
589 ni
= (basetime_index
+ 1) & BASETIME_ARYMASK
;
591 *nbt
= basetime
[basetime_index
];
594 * ntp adjustments only occur on cpu 0 and are protected by
595 * ntp_spin. This spinlock virtually never conflicts.
597 spin_lock(&ntp_spin
);
600 * Apply adjtime corrections. (adjtime() API)
602 * adjtime() only runs on cpu #0 so our critical section is
603 * sufficient to access these variables.
605 if (ntp_delta
!= 0) {
606 nbt
->tv_nsec
+= ntp_tick_delta
;
607 ntp_delta
-= ntp_tick_delta
;
608 if ((ntp_delta
> 0 && ntp_delta
< ntp_tick_delta
) ||
609 (ntp_delta
< 0 && ntp_delta
> ntp_tick_delta
)) {
610 ntp_tick_delta
= ntp_delta
;
615 * Apply permanent frequency corrections. (sysctl API)
617 if (ntp_tick_permanent
!= 0) {
618 ntp_tick_acc
+= ntp_tick_permanent
;
619 if (ntp_tick_acc
>= (1LL << 32)) {
620 nbt
->tv_nsec
+= ntp_tick_acc
>> 32;
621 ntp_tick_acc
-= (ntp_tick_acc
>> 32) << 32;
622 } else if (ntp_tick_acc
<= -(1LL << 32)) {
623 /* Negate ntp_tick_acc to avoid shifting the sign bit. */
624 nbt
->tv_nsec
-= (-ntp_tick_acc
) >> 32;
625 ntp_tick_acc
+= ((-ntp_tick_acc
) >> 32) << 32;
629 if (nbt
->tv_nsec
>= 1000000000) {
631 nbt
->tv_nsec
-= 1000000000;
632 } else if (nbt
->tv_nsec
< 0) {
634 nbt
->tv_nsec
+= 1000000000;
638 * Another per-tick compensation. (for ntp_adjtime() API)
641 nsec_acc
+= nsec_adj
;
642 if (nsec_acc
>= 0x100000000LL
) {
643 nbt
->tv_nsec
+= nsec_acc
>> 32;
644 nsec_acc
= (nsec_acc
& 0xFFFFFFFFLL
);
645 } else if (nsec_acc
<= -0x100000000LL
) {
646 nbt
->tv_nsec
-= -nsec_acc
>> 32;
647 nsec_acc
= -(-nsec_acc
& 0xFFFFFFFFLL
);
649 if (nbt
->tv_nsec
>= 1000000000) {
650 nbt
->tv_nsec
-= 1000000000;
652 } else if (nbt
->tv_nsec
< 0) {
653 nbt
->tv_nsec
+= 1000000000;
657 spin_unlock(&ntp_spin
);
659 /************************************************************
660 * LEAP SECOND CORRECTION *
661 ************************************************************
663 * Taking into account all the corrections made above, figure
664 * out the new real time. If the seconds field has changed
665 * then apply any pending leap-second corrections.
667 getnanotime_nbt(nbt
, &nts
);
669 if (time_second
!= nts
.tv_sec
) {
671 * Apply leap second (sysctl API). Adjust nts for changes
672 * so we do not have to call getnanotime_nbt again.
674 if (ntp_leap_second
) {
675 if (ntp_leap_second
== nts
.tv_sec
) {
676 if (ntp_leap_insert
) {
688 * Apply leap second (ntp_adjtime() API), calculate a new
689 * nsec_adj field. ntp_update_second() returns nsec_adj
690 * as a per-second value but we need it as a per-tick value.
692 leap
= ntp_update_second(time_second
, &nsec_adj
);
698 * Update the time_second 'approximate time' global.
700 time_second
= nts
.tv_sec
;
704 * Finally, our new basetime is ready to go live!
710 * Update kpmap on each tick. TS updates are integrated with
711 * fences and upticks allowing userland to read the data
717 w
= (kpmap
->upticks
+ 1) & 1;
718 getnanouptime(&kpmap
->ts_uptime
[w
]);
719 getnanotime(&kpmap
->ts_realtime
[w
]);
727 * lwkt thread scheduler fair queueing
729 lwkt_schedulerclock(curthread
);
732 * softticks are handled for all cpus
734 hardclock_softtick(gd
);
737 * Rollup accumulated vmstats, copy-back for critical path checks.
739 vmstats_rollup_cpu(gd
);
740 vfscache_rollup_cpu(gd
);
741 mycpu
->gd_vmstats
= vmstats
;
744 * ITimer handling is per-tick, per-cpu.
746 * We must acquire the per-process token in order for ksignal()
747 * to be non-blocking. For the moment this requires an AST fault,
748 * the ksignal() cannot be safely issued from this hard interrupt.
750 * XXX Even the trytoken here isn't right, and itimer operation in
751 * a multi threaded environment is going to be weird at the
754 if ((p
= curproc
) != NULL
&& lwkt_trytoken(&p
->p_token
)) {
757 ++p
->p_upmap
->runticks
;
759 if (frame
&& CLKF_USERMODE(frame
) &&
760 timevalisset(&p
->p_timer
[ITIMER_VIRTUAL
].it_value
) &&
761 itimerdecr(&p
->p_timer
[ITIMER_VIRTUAL
], ustick
) == 0) {
762 p
->p_flags
|= P_SIGVTALRM
;
765 if (timevalisset(&p
->p_timer
[ITIMER_PROF
].it_value
) &&
766 itimerdecr(&p
->p_timer
[ITIMER_PROF
], ustick
) == 0) {
767 p
->p_flags
|= P_SIGPROF
;
771 lwkt_reltoken(&p
->p_token
);
777 * The statistics clock typically runs at a 125Hz rate, and is intended
778 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu.
780 * NOTE! systimer! the MP lock might not be held here. We can only safely
781 * manipulate objects owned by the current cpu.
783 * The stats clock is responsible for grabbing a profiling sample.
784 * Most of the statistics are only used by user-level statistics programs.
785 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
788 * Like the other clocks, the stat clock is called from what is effectively
789 * a fast interrupt, so the context should be the thread/process that got
793 statclock(systimer_t info
, int in_ipi
, struct intrframe
*frame
)
799 globaldata_t gd
= mycpu
;
807 * How big was our timeslice relative to the last time? Calculate
810 * NOTE: Use of microuptime() is typically MPSAFE, but usually not
811 * during early boot. Just use the systimer count to be nice
812 * to e.g. qemu. The systimer has a better chance of being
813 * MPSAFE at early boot.
815 cv
= sys_cputimer
->count();
816 scv
= gd
->statint
.gd_statcv
;
820 bump
= (sys_cputimer
->freq64_usec
* (cv
- scv
)) >> 32;
826 gd
->statint
.gd_statcv
= cv
;
829 stv
= &gd
->gd_stattv
;
830 if (stv
->tv_sec
== 0) {
833 bump
= tv
.tv_usec
- stv
->tv_usec
+
834 (tv
.tv_sec
- stv
->tv_sec
) * 1000000;
846 if (frame
&& CLKF_USERMODE(frame
)) {
848 * Came from userland, handle user time and deal with
851 if (p
&& (p
->p_flags
& P_PROFIL
))
852 addupc_intr(p
, CLKF_PC(frame
), 1);
853 td
->td_uticks
+= bump
;
856 * Charge the time as appropriate
858 if (p
&& p
->p_nice
> NZERO
)
859 cpu_time
.cp_nice
+= bump
;
861 cpu_time
.cp_user
+= bump
;
863 int intr_nest
= gd
->gd_intr_nesting_level
;
867 * IPI processing code will bump gd_intr_nesting_level
868 * up by one, which breaks following CLKF_INTR testing,
869 * so we subtract it by one here.
875 * Kernel statistics are just like addupc_intr, only easier.
878 if (g
->state
== GMON_PROF_ON
&& frame
) {
879 i
= CLKF_PC(frame
) - g
->lowpc
;
880 if (i
< g
->textsize
) {
881 i
/= HISTFRACTION
* sizeof(*g
->kcount
);
887 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td))
890 * Came from kernel mode, so we were:
891 * - handling an interrupt,
892 * - doing syscall or trap work on behalf of the current
894 * - spinning in the idle loop.
895 * Whichever it is, charge the time as appropriate.
896 * Note that we charge interrupts to the current process,
897 * regardless of whether they are ``for'' that process,
898 * so that we know how much of its real time was spent
899 * in ``non-process'' (i.e., interrupt) work.
901 * XXX assume system if frame is NULL. A NULL frame
902 * can occur if ipi processing is done from a crit_exit().
904 if (IS_INTR_RUNNING
||
905 (gd
->gd_reqflags
& RQF_INTPEND
)) {
907 * If we interrupted an interrupt thread, well,
908 * count it as interrupt time.
910 td
->td_iticks
+= bump
;
913 do_pctrack(frame
, PCTRACK_INT
);
915 cpu_time
.cp_intr
+= bump
;
916 } else if (gd
->gd_flags
& GDF_VIRTUSER
) {
918 * The vkernel doesn't do a good job providing trap
919 * frames that we can test. If the GDF_VIRTUSER
920 * flag is set we probably interrupted user mode.
922 * We also use this flag on the host when entering
925 td
->td_uticks
+= bump
;
928 * Charge the time as appropriate
930 if (p
&& p
->p_nice
> NZERO
)
931 cpu_time
.cp_nice
+= bump
;
933 cpu_time
.cp_user
+= bump
;
935 td
->td_sticks
+= bump
;
936 if (td
== &gd
->gd_idlethread
) {
938 * We want to count token contention as
939 * system time. When token contention occurs
940 * the cpu may only be outside its critical
941 * section while switching through the idle
942 * thread. In this situation, various flags
943 * will be set in gd_reqflags.
945 if (gd
->gd_reqflags
& RQF_IDLECHECK_WK_MASK
)
946 cpu_time
.cp_sys
+= bump
;
948 cpu_time
.cp_idle
+= bump
;
951 * System thread was running.
955 do_pctrack(frame
, PCTRACK_SYS
);
957 cpu_time
.cp_sys
+= bump
;
961 #undef IS_INTR_RUNNING
967 * Sample the PC when in the kernel or in an interrupt. User code can
968 * retrieve the information and generate a histogram or other output.
972 do_pctrack(struct intrframe
*frame
, int which
)
974 struct kinfo_pctrack
*pctrack
;
976 pctrack
= &cputime_pctrack
[mycpu
->gd_cpuid
][which
];
977 pctrack
->pc_array
[pctrack
->pc_index
& PCTRACK_ARYMASK
] =
978 (void *)CLKF_PC(frame
);
983 sysctl_pctrack(SYSCTL_HANDLER_ARGS
)
985 struct kinfo_pcheader head
;
990 head
.pc_ntrack
= PCTRACK_SIZE
;
991 head
.pc_arysize
= PCTRACK_ARYSIZE
;
993 if ((error
= SYSCTL_OUT(req
, &head
, sizeof(head
))) != 0)
996 for (cpu
= 0; cpu
< ncpus
; ++cpu
) {
997 for (ntrack
= 0; ntrack
< PCTRACK_SIZE
; ++ntrack
) {
998 error
= SYSCTL_OUT(req
, &cputime_pctrack
[cpu
][ntrack
],
999 sizeof(struct kinfo_pctrack
));
1008 SYSCTL_PROC(_kern
, OID_AUTO
, pctrack
, (CTLTYPE_OPAQUE
|CTLFLAG_RD
), 0, 0,
1009 sysctl_pctrack
, "S,kinfo_pcheader", "CPU PC tracking");
1014 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer,
1015 * the MP lock might not be held. We can safely manipulate parts of curproc
1016 * but that's about it.
1018 * Each cpu has its own scheduler clock.
1021 schedclock(systimer_t info
, int in_ipi __unused
, struct intrframe
*frame
)
1028 if ((lp
= lwkt_preempted_proc()) != NULL
) {
1030 * Account for cpu time used and hit the scheduler. Note
1031 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD
1035 usched_schedulerclock(lp
, info
->periodic
, info
->time
);
1037 usched_schedulerclock(NULL
, info
->periodic
, info
->time
);
1039 if ((lp
= curthread
->td_lwp
) != NULL
) {
1041 * Update resource usage integrals and maximums.
1043 if ((ru
= &lp
->lwp_proc
->p_ru
) &&
1044 (vm
= lp
->lwp_proc
->p_vmspace
) != NULL
) {
1045 ru
->ru_ixrss
+= pgtok(vm
->vm_tsize
);
1046 ru
->ru_idrss
+= pgtok(vm
->vm_dsize
);
1047 ru
->ru_isrss
+= pgtok(vm
->vm_ssize
);
1048 if (lwkt_trytoken(&vm
->vm_map
.token
)) {
1049 rss
= pgtok(vmspace_resident_count(vm
));
1050 if (ru
->ru_maxrss
< rss
)
1051 ru
->ru_maxrss
= rss
;
1052 lwkt_reltoken(&vm
->vm_map
.token
);
1056 /* Increment the global sched_ticks */
1057 if (mycpu
->gd_cpuid
== 0)
1062 * Compute number of ticks for the specified amount of time. The
1063 * return value is intended to be used in a clock interrupt timed
1064 * operation and guaranteed to meet or exceed the requested time.
1065 * If the representation overflows, return INT_MAX. The minimum return
1066 * value is 1 ticks and the function will average the calculation up.
1067 * If any value greater then 0 microseconds is supplied, a value
1068 * of at least 2 will be returned to ensure that a near-term clock
1069 * interrupt does not cause the timeout to occur (degenerately) early.
1071 * Note that limit checks must take into account microseconds, which is
1072 * done simply by using the smaller signed long maximum instead of
1073 * the unsigned long maximum.
1075 * If ints have 32 bits, then the maximum value for any timeout in
1076 * 10ms ticks is 248 days.
1079 tvtohz_high(struct timeval
*tv
)
1096 kprintf("tvtohz_high: negative time difference "
1097 "%ld sec %ld usec\n",
1101 } else if (sec
<= INT_MAX
/ hz
) {
1102 ticks
= (int)(sec
* hz
+
1103 ((u_long
)usec
+ (ustick
- 1)) / ustick
) + 1;
1111 tstohz_high(struct timespec
*ts
)
1128 kprintf("tstohz_high: negative time difference "
1129 "%ld sec %ld nsec\n",
1133 } else if (sec
<= INT_MAX
/ hz
) {
1134 ticks
= (int)(sec
* hz
+
1135 ((u_long
)nsec
+ (nstick
- 1)) / nstick
) + 1;
1144 * Compute number of ticks for the specified amount of time, erroring on
1145 * the side of it being too low to ensure that sleeping the returned number
1146 * of ticks will not result in a late return.
1148 * The supplied timeval may not be negative and should be normalized. A
1149 * return value of 0 is possible if the timeval converts to less then
1152 * If ints have 32 bits, then the maximum value for any timeout in
1153 * 10ms ticks is 248 days.
1156 tvtohz_low(struct timeval
*tv
)
1162 if (sec
<= INT_MAX
/ hz
)
1163 ticks
= (int)(sec
* hz
+ (u_long
)tv
->tv_usec
/ ustick
);
1170 tstohz_low(struct timespec
*ts
)
1176 if (sec
<= INT_MAX
/ hz
)
1177 ticks
= (int)(sec
* hz
+ (u_long
)ts
->tv_nsec
/ nstick
);
1184 * Start profiling on a process.
1186 * Caller must hold p->p_token();
1188 * Kernel profiling passes proc0 which never exits and hence
1189 * keeps the profile clock running constantly.
1192 startprofclock(struct proc
*p
)
1194 if ((p
->p_flags
& P_PROFIL
) == 0) {
1195 p
->p_flags
|= P_PROFIL
;
1197 if (++profprocs
== 1 && stathz
!= 0) {
1200 setstatclockrate(profhz
);
1208 * Stop profiling on a process.
1210 * caller must hold p->p_token
1213 stopprofclock(struct proc
*p
)
1215 if (p
->p_flags
& P_PROFIL
) {
1216 p
->p_flags
&= ~P_PROFIL
;
1218 if (--profprocs
== 0 && stathz
!= 0) {
1221 setstatclockrate(stathz
);
1229 * Return information about system clocks.
1232 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS
)
1234 struct kinfo_clockinfo clkinfo
;
1236 * Construct clockinfo structure.
1239 clkinfo
.ci_tick
= ustick
;
1240 clkinfo
.ci_tickadj
= ntp_default_tick_delta
/ 1000;
1241 clkinfo
.ci_profhz
= profhz
;
1242 clkinfo
.ci_stathz
= stathz
? stathz
: hz
;
1243 return (sysctl_handle_opaque(oidp
, &clkinfo
, sizeof clkinfo
, req
));
1246 SYSCTL_PROC(_kern
, KERN_CLOCKRATE
, clockrate
, CTLTYPE_STRUCT
|CTLFLAG_RD
,
1247 0, 0, sysctl_kern_clockrate
, "S,clockinfo","");
1250 * We have eight functions for looking at the clock, four for
1251 * microseconds and four for nanoseconds. For each there is fast
1252 * but less precise version "get{nano|micro}[up]time" which will
1253 * return a time which is up to 1/HZ previous to the call, whereas
1254 * the raw version "{nano|micro}[up]time" will return a timestamp
1255 * which is as precise as possible. The "up" variants return the
1256 * time relative to system boot, these are well suited for time
1257 * interval measurements.
1259 * Each cpu independently maintains the current time of day, so all
1260 * we need to do to protect ourselves from changes is to do a loop
1261 * check on the seconds field changing out from under us.
1263 * The system timer maintains a 32 bit count and due to various issues
1264 * it is possible for the calculated delta to occasionally exceed
1265 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec
1266 * multiplication can easily overflow, so we deal with the case. For
1267 * uniformity we deal with the case in the usec case too.
1269 * All the [get][micro,nano][time,uptime]() routines are MPSAFE.
1272 getmicrouptime(struct timeval
*tvp
)
1274 struct globaldata
*gd
= mycpu
;
1278 tvp
->tv_sec
= gd
->gd_time_seconds
;
1279 delta
= gd
->gd_hardclock
.time
- gd
->gd_cpuclock_base
;
1280 } while (tvp
->tv_sec
!= gd
->gd_time_seconds
);
1282 if (delta
>= sys_cputimer
->freq
) {
1283 tvp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1284 delta
%= sys_cputimer
->freq
;
1286 tvp
->tv_usec
= (sys_cputimer
->freq64_usec
* delta
) >> 32;
1287 if (tvp
->tv_usec
>= 1000000) {
1288 tvp
->tv_usec
-= 1000000;
1294 getnanouptime(struct timespec
*tsp
)
1296 struct globaldata
*gd
= mycpu
;
1300 tsp
->tv_sec
= gd
->gd_time_seconds
;
1301 delta
= gd
->gd_hardclock
.time
- gd
->gd_cpuclock_base
;
1302 } while (tsp
->tv_sec
!= gd
->gd_time_seconds
);
1304 if (delta
>= sys_cputimer
->freq
) {
1305 tsp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1306 delta
%= sys_cputimer
->freq
;
1308 tsp
->tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1312 microuptime(struct timeval
*tvp
)
1314 struct globaldata
*gd
= mycpu
;
1318 tvp
->tv_sec
= gd
->gd_time_seconds
;
1319 delta
= sys_cputimer
->count() - gd
->gd_cpuclock_base
;
1320 } while (tvp
->tv_sec
!= gd
->gd_time_seconds
);
1322 if (delta
>= sys_cputimer
->freq
) {
1323 tvp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1324 delta
%= sys_cputimer
->freq
;
1326 tvp
->tv_usec
= (sys_cputimer
->freq64_usec
* delta
) >> 32;
1330 nanouptime(struct timespec
*tsp
)
1332 struct globaldata
*gd
= mycpu
;
1336 tsp
->tv_sec
= gd
->gd_time_seconds
;
1337 delta
= sys_cputimer
->count() - gd
->gd_cpuclock_base
;
1338 } while (tsp
->tv_sec
!= gd
->gd_time_seconds
);
1340 if (delta
>= sys_cputimer
->freq
) {
1341 tsp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1342 delta
%= sys_cputimer
->freq
;
1344 tsp
->tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1351 getmicrotime(struct timeval
*tvp
)
1353 struct globaldata
*gd
= mycpu
;
1354 struct timespec
*bt
;
1358 tvp
->tv_sec
= gd
->gd_time_seconds
;
1359 delta
= gd
->gd_hardclock
.time
- gd
->gd_cpuclock_base
;
1360 } while (tvp
->tv_sec
!= gd
->gd_time_seconds
);
1362 if (delta
>= sys_cputimer
->freq
) {
1363 tvp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1364 delta
%= sys_cputimer
->freq
;
1366 tvp
->tv_usec
= (sys_cputimer
->freq64_usec
* delta
) >> 32;
1368 bt
= &basetime
[basetime_index
];
1370 tvp
->tv_sec
+= bt
->tv_sec
;
1371 tvp
->tv_usec
+= bt
->tv_nsec
/ 1000;
1372 while (tvp
->tv_usec
>= 1000000) {
1373 tvp
->tv_usec
-= 1000000;
1379 getnanotime(struct timespec
*tsp
)
1381 struct globaldata
*gd
= mycpu
;
1382 struct timespec
*bt
;
1386 tsp
->tv_sec
= gd
->gd_time_seconds
;
1387 delta
= gd
->gd_hardclock
.time
- gd
->gd_cpuclock_base
;
1388 } while (tsp
->tv_sec
!= gd
->gd_time_seconds
);
1390 if (delta
>= sys_cputimer
->freq
) {
1391 tsp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1392 delta
%= sys_cputimer
->freq
;
1394 tsp
->tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1396 bt
= &basetime
[basetime_index
];
1398 tsp
->tv_sec
+= bt
->tv_sec
;
1399 tsp
->tv_nsec
+= bt
->tv_nsec
;
1400 while (tsp
->tv_nsec
>= 1000000000) {
1401 tsp
->tv_nsec
-= 1000000000;
1407 getnanotime_nbt(struct timespec
*nbt
, struct timespec
*tsp
)
1409 struct globaldata
*gd
= mycpu
;
1413 tsp
->tv_sec
= gd
->gd_time_seconds
;
1414 delta
= gd
->gd_hardclock
.time
- gd
->gd_cpuclock_base
;
1415 } while (tsp
->tv_sec
!= gd
->gd_time_seconds
);
1417 if (delta
>= sys_cputimer
->freq
) {
1418 tsp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1419 delta
%= sys_cputimer
->freq
;
1421 tsp
->tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1423 tsp
->tv_sec
+= nbt
->tv_sec
;
1424 tsp
->tv_nsec
+= nbt
->tv_nsec
;
1425 while (tsp
->tv_nsec
>= 1000000000) {
1426 tsp
->tv_nsec
-= 1000000000;
1433 microtime(struct timeval
*tvp
)
1435 struct globaldata
*gd
= mycpu
;
1436 struct timespec
*bt
;
1440 tvp
->tv_sec
= gd
->gd_time_seconds
;
1441 delta
= sys_cputimer
->count() - gd
->gd_cpuclock_base
;
1442 } while (tvp
->tv_sec
!= gd
->gd_time_seconds
);
1444 if (delta
>= sys_cputimer
->freq
) {
1445 tvp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1446 delta
%= sys_cputimer
->freq
;
1448 tvp
->tv_usec
= (sys_cputimer
->freq64_usec
* delta
) >> 32;
1450 bt
= &basetime
[basetime_index
];
1452 tvp
->tv_sec
+= bt
->tv_sec
;
1453 tvp
->tv_usec
+= bt
->tv_nsec
/ 1000;
1454 while (tvp
->tv_usec
>= 1000000) {
1455 tvp
->tv_usec
-= 1000000;
1461 nanotime(struct timespec
*tsp
)
1463 struct globaldata
*gd
= mycpu
;
1464 struct timespec
*bt
;
1468 tsp
->tv_sec
= gd
->gd_time_seconds
;
1469 delta
= sys_cputimer
->count() - gd
->gd_cpuclock_base
;
1470 } while (tsp
->tv_sec
!= gd
->gd_time_seconds
);
1472 if (delta
>= sys_cputimer
->freq
) {
1473 tsp
->tv_sec
+= delta
/ sys_cputimer
->freq
;
1474 delta
%= sys_cputimer
->freq
;
1476 tsp
->tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1478 bt
= &basetime
[basetime_index
];
1480 tsp
->tv_sec
+= bt
->tv_sec
;
1481 tsp
->tv_nsec
+= bt
->tv_nsec
;
1482 while (tsp
->tv_nsec
>= 1000000000) {
1483 tsp
->tv_nsec
-= 1000000000;
1489 * Get an approximate time_t. It does not have to be accurate. This
1490 * function is called only from KTR and can be called with the system in
1491 * any state so do not use a critical section or other complex operation
1494 * NOTE: This is not exactly synchronized with real time. To do that we
1495 * would have to do what microtime does and check for a nanoseconds
1499 get_approximate_time_t(void)
1501 struct globaldata
*gd
= mycpu
;
1502 struct timespec
*bt
;
1504 bt
= &basetime
[basetime_index
];
1505 return(gd
->gd_time_seconds
+ bt
->tv_sec
);
1509 pps_ioctl(u_long cmd
, caddr_t data
, struct pps_state
*pps
)
1512 struct pps_fetch_args
*fapi
;
1514 struct pps_kcbind_args
*kapi
;
1518 case PPS_IOC_CREATE
:
1520 case PPS_IOC_DESTROY
:
1522 case PPS_IOC_SETPARAMS
:
1523 app
= (pps_params_t
*)data
;
1524 if (app
->mode
& ~pps
->ppscap
)
1526 pps
->ppsparam
= *app
;
1528 case PPS_IOC_GETPARAMS
:
1529 app
= (pps_params_t
*)data
;
1530 *app
= pps
->ppsparam
;
1531 app
->api_version
= PPS_API_VERS_1
;
1533 case PPS_IOC_GETCAP
:
1534 *(int*)data
= pps
->ppscap
;
1537 fapi
= (struct pps_fetch_args
*)data
;
1538 if (fapi
->tsformat
&& fapi
->tsformat
!= PPS_TSFMT_TSPEC
)
1540 if (fapi
->timeout
.tv_sec
|| fapi
->timeout
.tv_nsec
)
1541 return (EOPNOTSUPP
);
1542 pps
->ppsinfo
.current_mode
= pps
->ppsparam
.mode
;
1543 fapi
->pps_info_buf
= pps
->ppsinfo
;
1545 case PPS_IOC_KCBIND
:
1547 kapi
= (struct pps_kcbind_args
*)data
;
1548 /* XXX Only root should be able to do this */
1549 if (kapi
->tsformat
&& kapi
->tsformat
!= PPS_TSFMT_TSPEC
)
1551 if (kapi
->kernel_consumer
!= PPS_KC_HARDPPS
)
1553 if (kapi
->edge
& ~pps
->ppscap
)
1555 pps
->kcmode
= kapi
->edge
;
1558 return (EOPNOTSUPP
);
1566 pps_init(struct pps_state
*pps
)
1568 pps
->ppscap
|= PPS_TSFMT_TSPEC
;
1569 if (pps
->ppscap
& PPS_CAPTUREASSERT
)
1570 pps
->ppscap
|= PPS_OFFSETASSERT
;
1571 if (pps
->ppscap
& PPS_CAPTURECLEAR
)
1572 pps
->ppscap
|= PPS_OFFSETCLEAR
;
1576 pps_event(struct pps_state
*pps
, sysclock_t count
, int event
)
1578 struct globaldata
*gd
;
1579 struct timespec
*tsp
;
1580 struct timespec
*osp
;
1581 struct timespec
*bt
;
1597 /* Things would be easier with arrays... */
1598 if (event
== PPS_CAPTUREASSERT
) {
1599 tsp
= &pps
->ppsinfo
.assert_timestamp
;
1600 osp
= &pps
->ppsparam
.assert_offset
;
1601 foff
= pps
->ppsparam
.mode
& PPS_OFFSETASSERT
;
1603 fhard
= pps
->kcmode
& PPS_CAPTUREASSERT
;
1605 pcount
= &pps
->ppscount
[0];
1606 pseq
= &pps
->ppsinfo
.assert_sequence
;
1608 tsp
= &pps
->ppsinfo
.clear_timestamp
;
1609 osp
= &pps
->ppsparam
.clear_offset
;
1610 foff
= pps
->ppsparam
.mode
& PPS_OFFSETCLEAR
;
1612 fhard
= pps
->kcmode
& PPS_CAPTURECLEAR
;
1614 pcount
= &pps
->ppscount
[1];
1615 pseq
= &pps
->ppsinfo
.clear_sequence
;
1618 /* Nothing really happened */
1619 if (*pcount
== count
)
1625 ts
.tv_sec
= gd
->gd_time_seconds
;
1626 delta
= count
- gd
->gd_cpuclock_base
;
1627 } while (ts
.tv_sec
!= gd
->gd_time_seconds
);
1629 if (delta
>= sys_cputimer
->freq
) {
1630 ts
.tv_sec
+= delta
/ sys_cputimer
->freq
;
1631 delta
%= sys_cputimer
->freq
;
1633 ts
.tv_nsec
= (sys_cputimer
->freq64_nsec
* delta
) >> 32;
1634 ni
= basetime_index
;
1637 ts
.tv_sec
+= bt
->tv_sec
;
1638 ts
.tv_nsec
+= bt
->tv_nsec
;
1639 while (ts
.tv_nsec
>= 1000000000) {
1640 ts
.tv_nsec
-= 1000000000;
1648 timespecadd(tsp
, osp
);
1649 if (tsp
->tv_nsec
< 0) {
1650 tsp
->tv_nsec
+= 1000000000;
1656 /* magic, at its best... */
1657 tcount
= count
- pps
->ppscount
[2];
1658 pps
->ppscount
[2] = count
;
1659 if (tcount
>= sys_cputimer
->freq
) {
1660 delta
= (1000000000 * (tcount
/ sys_cputimer
->freq
) +
1661 sys_cputimer
->freq64_nsec
*
1662 (tcount
% sys_cputimer
->freq
)) >> 32;
1664 delta
= (sys_cputimer
->freq64_nsec
* tcount
) >> 32;
1666 hardpps(tsp
, delta
);
1672 * Return the tsc target value for a delay of (ns).
1674 * Returns -1 if the TSC is not supported.
1677 tsc_get_target(int ns
)
1679 #if defined(_RDTSC_SUPPORTED_)
1680 if (cpu_feature
& CPUID_TSC
) {
1681 return (rdtsc() + tsc_frequency
* ns
/ (int64_t)1000000000);
1688 * Compare the tsc against the passed target
1690 * Returns +1 if the target has been reached
1691 * Returns 0 if the target has not yet been reached
1692 * Returns -1 if the TSC is not supported.
1694 * Typical use: while (tsc_test_target(target) == 0) { ...poll... }
1697 tsc_test_target(int64_t target
)
1699 #if defined(_RDTSC_SUPPORTED_)
1700 if (cpu_feature
& CPUID_TSC
) {
1701 if ((int64_t)(target
- rdtsc()) <= 0)
1710 * Delay the specified number of nanoseconds using the tsc. This function
1711 * returns immediately if the TSC is not supported. At least one cpu_pause()
1719 clk
= tsc_get_target(ns
);
1722 while (tsc_test_target(clk
) == 0) {