4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * Copyright (c) 2018, Carlos Neira <cneirabustos@gmail.com>
32 #include <sys/param.h>
33 #include <sys/t_lock.h>
34 #include <sys/types.h>
35 #include <sys/tuneable.h>
36 #include <sys/sysmacros.h>
37 #include <sys/systm.h>
38 #include <sys/cpuvar.h>
42 #include <sys/callo.h>
45 #include <sys/cmn_err.h>
47 #include <sys/vmsystm.h>
48 #include <sys/class.h>
50 #include <sys/debug.h>
51 #include <sys/vtrace.h>
53 #include <sys/atomic.h>
54 #include <sys/dumphdr.h>
55 #include <sys/archsystm.h>
56 #include <sys/fs/swapnode.h>
57 #include <sys/panic.h>
59 #include <sys/msacct.h>
60 #include <sys/mem_cage.h>
65 #include <sys/cyclic.h>
66 #include <sys/cpupart.h>
70 #include <sys/ddi_periodic.h>
71 #include <sys/random.h>
72 #include <sys/modctl.h>
78 #include <sys/timex.h>
79 #include <sys/inttypes.h>
81 #include <sys/sunddi.h>
82 #include <sys/clock_impl.h>
85 * clock() is called straight from the clock cyclic; see clock_init().
93 extern kcondvar_t fsflush_cv
;
94 extern sysinfo_t sysinfo
;
95 extern vminfo_t vminfo
;
96 extern int idleswtch
; /* flag set while idle in pswtch() */
97 extern hrtime_t
volatile devinfo_freeze
;
100 * high-precision avenrun values. These are needed to make the
101 * regular avenrun values accurate.
103 static uint64_t hp_avenrun
[3];
104 int avenrun
[3]; /* FSCALED average run queue lengths */
105 time_t time
; /* time in seconds since 1970 - for compatibility only */
107 static struct loadavg_s loadavg
;
109 * Phase/frequency-lock loop (PLL/FLL) definitions
111 * The following variables are read and set by the ntp_adjtime() system
114 * time_state shows the state of the system clock, with values defined
115 * in the timex.h header file.
117 * time_status shows the status of the system clock, with bits defined
118 * in the timex.h header file.
120 * time_offset is used by the PLL/FLL to adjust the system time in small
123 * time_constant determines the bandwidth or "stiffness" of the PLL.
125 * time_tolerance determines maximum frequency error or tolerance of the
126 * CPU clock oscillator and is a property of the architecture; however,
127 * in principle it could change as result of the presence of external
128 * discipline signals, for instance.
130 * time_precision is usually equal to the kernel tick variable; however,
131 * in cases where a precision clock counter or external clock is
132 * available, the resolution can be much less than this and depend on
133 * whether the external clock is working or not.
135 * time_maxerror is initialized by a ntp_adjtime() call and increased by
136 * the kernel once each second to reflect the maximum error bound
139 * time_esterror is set and read by the ntp_adjtime() call, but
140 * otherwise not used by the kernel.
142 int32_t time_state
= TIME_OK
; /* clock state */
143 int32_t time_status
= STA_UNSYNC
; /* clock status bits */
144 int32_t time_offset
= 0; /* time offset (us) */
145 int32_t time_constant
= 0; /* pll time constant */
146 int32_t time_tolerance
= MAXFREQ
; /* frequency tolerance (scaled ppm) */
147 int32_t time_precision
= 1; /* clock precision (us) */
148 int32_t time_maxerror
= MAXPHASE
; /* maximum error (us) */
149 int32_t time_esterror
= MAXPHASE
; /* estimated error (us) */
152 * The following variables establish the state of the PLL/FLL and the
153 * residual time and frequency offset of the local clock. The scale
154 * factors are defined in the timex.h header file.
156 * time_phase and time_freq are the phase increment and the frequency
157 * increment, respectively, of the kernel time variable.
159 * time_freq is set via ntp_adjtime() from a value stored in a file when
160 * the synchronization daemon is first started. Its value is retrieved
161 * via ntp_adjtime() and written to the file about once per hour by the
164 * time_adj is the adjustment added to the value of tick at each timer
165 * interrupt and is recomputed from time_phase and time_freq at each
168 * time_reftime is the second's portion of the system time at the last
169 * call to ntp_adjtime(). It is used to adjust the time_freq variable
170 * and to increase the time_maxerror as the time since last update
173 int32_t time_phase
= 0; /* phase offset (scaled us) */
174 int32_t time_freq
= 0; /* frequency offset (scaled ppm) */
175 int32_t time_adj
= 0; /* tick adjust (scaled 1 / hz) */
176 int32_t time_reftime
= 0; /* time at last adjustment (s) */
179 * The scale factors of the following variables are defined in the
180 * timex.h header file.
182 * pps_time contains the time at each calibration interval, as read by
183 * microtime(). pps_count counts the seconds of the calibration
184 * interval, the duration of which is nominally pps_shift in powers of
187 * pps_offset is the time offset produced by the time median filter
188 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
191 * pps_freq is the frequency offset produced by the frequency median
192 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
195 * pps_usec is latched from a high resolution counter or external clock
196 * at pps_time. Here we want the hardware counter contents only, not the
197 * contents plus the time_tv.usec as usual.
199 * pps_valid counts the number of seconds since the last PPS update. It
200 * is used as a watchdog timer to disable the PPS discipline should the
201 * PPS signal be lost.
203 * pps_glitch counts the number of seconds since the beginning of an
204 * offset burst more than tick/2 from current nominal offset. It is used
205 * mainly to suppress error bursts due to priority conflicts between the
206 * PPS interrupt and timer interrupt.
208 * pps_intcnt counts the calibration intervals for use in the interval-
209 * adaptation algorithm. It's just too complicated for words.
211 struct timeval pps_time
; /* kernel time at last interval */
212 int32_t pps_tf
[] = {0, 0, 0}; /* pps time offset median filter (us) */
213 int32_t pps_offset
= 0; /* pps time offset (us) */
214 int32_t pps_jitter
= MAXTIME
; /* time dispersion (jitter) (us) */
215 int32_t pps_ff
[] = {0, 0, 0}; /* pps frequency offset median filter */
216 int32_t pps_freq
= 0; /* frequency offset (scaled ppm) */
217 int32_t pps_stabil
= MAXFREQ
; /* frequency dispersion (scaled ppm) */
218 int32_t pps_usec
= 0; /* microsec counter at last interval */
219 int32_t pps_valid
= PPS_VALID
; /* pps signal watchdog counter */
220 int32_t pps_glitch
= 0; /* pps signal glitch counter */
221 int32_t pps_count
= 0; /* calibration interval counter (s) */
222 int32_t pps_shift
= PPS_SHIFT
; /* interval duration (s) (shift) */
223 int32_t pps_intcnt
= 0; /* intervals at current duration */
226 * PPS signal quality monitors
228 * pps_jitcnt counts the seconds that have been discarded because the
229 * jitter measured by the time median filter exceeds the limit MAXTIME
232 * pps_calcnt counts the frequency calibration intervals, which are
233 * variable from 4 s to 256 s.
235 * pps_errcnt counts the calibration intervals which have been discarded
236 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
237 * calibration interval jitter exceeds two ticks.
239 * pps_stbcnt counts the calibration intervals that have been discarded
240 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
242 int32_t pps_jitcnt
= 0; /* jitter limit exceeded */
243 int32_t pps_calcnt
= 0; /* calibration intervals */
244 int32_t pps_errcnt
= 0; /* calibration errors */
245 int32_t pps_stbcnt
= 0; /* stability limit exceeded */
250 * Hybrid lbolt implementation:
252 * The service historically provided by the lbolt and lbolt64 variables has
253 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
254 * original symbols removed from the system. The once clock driven variables are
255 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
256 * the appropriate clock resolution. The default event driven implementation is
257 * complemented by a cyclic driven one, active only during periods of intense
258 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
259 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
260 * rely on the original low cost of consulting a memory position.
262 * The implementation uses the number of calls to these routines and the
263 * frequency of these to determine when to transition from event to cyclic
264 * driven and vice-versa. These values are kept on a per CPU basis for
265 * scalability reasons and to prevent CPUs from constantly invalidating a single
266 * cache line when modifying a global variable. The transition from event to
267 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
268 * can cause such transition.
270 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
271 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
272 * lbolt_cyclic_driven() according to the current mode. When the thresholds
273 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
274 * fire at a nsec_per_tick interval and increment an internal variable at
275 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
276 * will simply return the value of such variable. lbolt_cyclic() will attempt
277 * to shut itself off at each threshold interval (sampling period for calls
278 * to the DDI lbolt routines), and return to the event driven mode, but will
279 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
281 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
282 * for the cyclic subsystem to be intialized.
285 int64_t lbolt_bootstrap(void);
286 int64_t lbolt_event_driven(void);
287 int64_t lbolt_cyclic_driven(void);
288 int64_t (*lbolt_hybrid
)(void) = lbolt_bootstrap
;
289 uint_t
lbolt_ev_to_cyclic(caddr_t
, caddr_t
);
292 * lbolt's cyclic, installed by clock_init().
294 static void lbolt_cyclic(void);
297 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
298 * from switching back to event driven, once it reaches cyclic mode.
300 static boolean_t lbolt_cyc_only
= B_FALSE
;
303 * Cache aligned, per CPU structure with lbolt usage statistics.
305 static lbolt_cpu_t
*lb_cpu
;
308 * Single, cache aligned, structure with all the information required by
309 * the lbolt implementation.
311 lbolt_info_t
*lb_info
;
314 int one_sec
= 1; /* turned on once every second */
315 static int fsflushcnt
; /* counter for t_fsflushr */
316 int dosynctodr
= 1; /* patchable; enable/disable sync to TOD chip */
317 int tod_needsync
= 0; /* need to sync tod chip with software time */
318 static int tod_broken
= 0; /* clock chip doesn't work */
319 time_t boot_time
= 0; /* Boot time in seconds since 1970 */
320 cyclic_id_t clock_cyclic
; /* clock()'s cyclic_id */
321 cyclic_id_t deadman_cyclic
; /* deadman()'s cyclic_id */
323 extern void clock_tick_schedule(int);
325 static int lgrp_ticks
; /* counter to schedule lgrp load calcs */
328 * for tod fault detection
330 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
331 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
332 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
333 #define TOD_FILTER_N 4
334 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
335 static enum tod_fault_type tod_faulted
= TOD_NOFAULT
;
337 static int tod_status_flag
= 0; /* used by tod_validate() */
339 static hrtime_t prev_set_tick
= 0; /* gethrtime() prior to tod_set() */
340 static time_t prev_set_tod
= 0; /* tv_sec value passed to tod_set() */
342 /* patchable via /etc/system */
343 int tod_validate_enable
= 1;
345 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
346 int delay_from_interrupt_diagnose
= 0;
347 volatile uint32_t delay_from_interrupt_msg
= 20;
350 * On non-SPARC systems, TOD validation must be deferred until gethrtime
351 * returns non-zero values (after mach_clkinit's execution).
352 * On SPARC systems, it must be deferred until after hrtime_base
353 * and hres_last_tick are set (in the first invocation of hres_tick).
354 * Since in both cases the prerequisites occur before the invocation of
355 * tod_get() in clock(), the deferment is lifted there.
357 static boolean_t tod_validate_deferred
= B_TRUE
;
360 * tod_fault_table[] must be aligned with
361 * enum tod_fault_type in systm.h
363 static char *tod_fault_table
[] = {
364 "Reversed", /* TOD_REVERSED */
365 "Stalled", /* TOD_STALLED */
366 "Jumped", /* TOD_JUMPED */
367 "Changed in Clock Rate", /* TOD_RATECHANGED */
368 "Is Read-Only" /* TOD_RDONLY */
370 * no strings needed for TOD_NOFAULT
375 * test hook for tod broken detection in tod_validate
377 int tod_unit_test
= 0;
378 time_t tod_test_injector
;
380 #define CLOCK_ADJ_HIST_SIZE 4
382 static int adj_hist_entry
;
384 int64_t clock_adj_hist
[CLOCK_ADJ_HIST_SIZE
];
386 static void calcloadavg(int, uint64_t *);
387 static int genloadavg(struct loadavg_s
*);
388 static void loadavg_update();
390 void (*cpucaps_clock_callout
)() = NULL
;
392 extern clock_t clock_tick_proc_max
;
394 static int64_t deadman_counter
= 0;
396 static void recompute_load_averages(void);
397 static void onesec_time_adjustments(void);
398 static void onesec_waiters(void);
400 cyclic_id_t recompute_load_averages_cyclic
;
401 cyclic_id_t onesec_time_adjustments_cyclic
;
402 cyclic_id_t onesec_waiters_cyclic
;
407 extern void set_freemem();
416 * Make sure that 'freemem' do not drift too far from the truth
422 * Before the section which is repeated is executed, we do
423 * the time delta processing which occurs every clock tick
425 * There is additional processing which happens every time
426 * the nanosecond counter rolls over which is described
427 * below - see the section which begins with : if (one_sec)
429 * This section marks the beginning of the precision-kernel
432 * First, compute the phase adjustment. If the low-order bits
433 * (time_phase) of the update overflow, bump the higher order
434 * bits (time_update).
436 time_phase
+= time_adj
;
437 if (time_phase
<= -FINEUSEC
) {
438 ltemp
= -time_phase
/ SCALE_PHASE
;
439 time_phase
+= ltemp
* SCALE_PHASE
;
441 timedelta
-= ltemp
* (NANOSEC
/MICROSEC
);
443 } else if (time_phase
>= FINEUSEC
) {
444 ltemp
= time_phase
/ SCALE_PHASE
;
445 time_phase
-= ltemp
* SCALE_PHASE
;
447 timedelta
+= ltemp
* (NANOSEC
/MICROSEC
);
452 * End of precision-kernel code fragment which is processed
453 * every timer interrupt.
455 * Continue with the interrupt processing as scheduled.
458 clock_tick_schedule(one_sec
);
461 * Check for a callout that needs be called from the clock
462 * thread to support the membership protocol in a clustered
463 * system. Copy the function pointer so that we can reset
464 * this to NULL if needed.
466 if ((funcp
= cpucaps_clock_callout
) != NULL
)
471 recompute_load_averages(void)
479 pgcnt_t maxswap
, resv
, free
, avail
;
482 * Count the number of runnable threads and the number waiting
483 * for some form of I/O to complete -- gets added to
484 * sysinfo.waiting. To know the state of the system, must add
485 * wait counts from all CPUs. Also add up the per-partition
493 * First count the threads waiting on kpreempt queues in each
497 cpupart
= cp_list_head
;
499 uint_t cpupart_nrunnable
= cpupart
->cp_kp_queue
.disp_nrunnable
;
501 cpupart
->cp_updates
++;
502 nrunnable
+= cpupart_nrunnable
;
503 cpupart
->cp_nrunnable_cum
+= cpupart_nrunnable
;
504 cpupart
->cp_nrunning
= 0;
505 cpupart
->cp_nrunnable
= cpupart_nrunnable
;
506 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
509 /* Now count the per-CPU statistics. */
512 uint_t cpu_nrunnable
= cp
->cpu_disp
->disp_nrunnable
;
514 nrunnable
+= cpu_nrunnable
;
515 cpupart
= cp
->cpu_part
;
516 cpupart
->cp_nrunnable_cum
+= cpu_nrunnable
;
517 cpupart
->cp_nrunnable
+= cpu_nrunnable
;
519 * Update user, system, and idle cpu times.
521 cpupart
->cp_nrunning
++;
523 * w_io is used to update sysinfo.waiting during
524 * one_second processing below. Only gather w_io
525 * information when we walk the list of cpus if we're
526 * going to perform one_second processing.
528 w_io
+= CPU_STATS(cp
, sys
.iowait
);
530 if (cp
->cpu_flags
& CPU_EXISTS
) {
532 hrtime_t intracct
, intrused
;
533 const hrtime_t maxnsec
= 1000000000;
534 const int precision
= 100;
537 * Estimate interrupt load on this cpu each second.
538 * Computes cpu_intrload as %utilization (0-99).
541 /* add up interrupt time from all micro states */
542 for (intracct
= 0, i
= 0; i
< NCMSTATES
; i
++)
543 intracct
+= cp
->cpu_intracct
[i
];
544 scalehrtime(&intracct
);
546 /* compute nsec used in the past second */
547 intrused
= intracct
- cp
->cpu_intrlast
;
548 cp
->cpu_intrlast
= intracct
;
550 /* limit the value for safety (and the first pass) */
551 if (intrused
>= maxnsec
)
552 intrused
= maxnsec
- 1;
554 /* calculate %time in interrupt */
555 load
= (precision
* intrused
) / maxnsec
;
556 ASSERT(load
>= 0 && load
< precision
);
557 change
= cp
->cpu_intrload
- load
;
559 /* jump to new max, or decay the old max */
561 cp
->cpu_intrload
= load
;
563 cp
->cpu_intrload
-= (change
+ 3) / 4;
565 DTRACE_PROBE3(cpu_intrload
,
571 if (cp
->cpu_flags
& CPU_EXISTS
) {
573 * When updating the lgroup's load average,
574 * account for the thread running on the CPU.
575 * If the CPU is the current one, then we need
576 * to account for the underlying thread which
577 * got the clock interrupt not the thread that is
578 * handling the interrupt and caculating the load
586 * Account for the load average for this thread if
587 * it isn't the idle thread or it is on the interrupt
588 * stack and not the current CPU handling the clock
591 if ((t
&& t
!= cp
->cpu_idle_thread
) || (CPU
!= cp
&&
593 if (t
->t_lpl
== cp
->cpu_lpl
) {
598 * This is a remote thread, charge it
599 * against its home lgroup. Note that
600 * we notice that a thread is remote
601 * only if it's currently executing.
602 * This is a reasonable approximation,
603 * since queued remote threads are rare.
604 * Note also that if we didn't charge
605 * it to its home lgroup, remote
606 * execution would often make a system
607 * appear balanced even though it was
608 * not, and thread placement/migration
609 * would often not be done correctly.
611 lgrp_loadavg(t
->t_lpl
,
612 LGRP_LOADAVG_IN_THREAD_MAX
, 0);
615 lgrp_loadavg(cp
->cpu_lpl
,
616 cpu_nrunnable
* LGRP_LOADAVG_IN_THREAD_MAX
, 1);
618 } while ((cp
= cp
->cpu_next
) != cpu_list
);
620 vminfo
.freemem
+= freemem
;
621 avail
= MAX((spgcnt_t
)(availrmem
- swapfs_minfree
), 0);
623 maxswap
= k_anoninfo
.ani_mem_resv
+ k_anoninfo
.ani_max
+ avail
;
624 /* Update ani_free */
626 free
= k_anoninfo
.ani_free
+ avail
;
627 resv
= k_anoninfo
.ani_phys_resv
+ k_anoninfo
.ani_mem_resv
;
629 vminfo
.swap_resv
+= resv
;
630 /* number of reserved and allocated pages */
633 cmn_err(CE_WARN
, "clock: maxswap < free");
635 cmn_err(CE_WARN
, "clock: maxswap < resv");
637 vminfo
.swap_alloc
+= maxswap
- free
;
638 vminfo
.swap_avail
+= maxswap
- resv
;
639 vminfo
.swap_free
+= free
;
643 sysinfo
.runque
+= nrunnable
;
647 sysinfo
.swpque
+= nswapped
;
650 sysinfo
.waiting
+= w_io
;
654 * Wake up fsflush to write out DELWRI
655 * buffers, dirty pages and other cached
656 * administrative data, e.g. inodes.
658 if (--fsflushcnt
<= 0) {
659 fsflushcnt
= tune
.t_fsflushr
;
660 cv_signal(&fsflush_cv
);
664 calcloadavg(genloadavg(&loadavg
), hp_avenrun
);
665 for (i
= 0; i
< 3; i
++)
667 * At the moment avenrun[] can only hold 31
668 * bits of load average as it is a signed
669 * int in the API. We need to ensure that
670 * hp_avenrun[i] >> (16 - FSHIFT) will not be
671 * too large. If it is, we put the largest value
672 * that we can use into avenrun[i]. This is
673 * kludgey, but about all we can do until we
674 * avenrun[] is declared as an array of uint64[]
676 if (hp_avenrun
[i
] < ((uint64_t)1<<(31+16-FSHIFT
)))
677 avenrun
[i
] = (int32_t)(hp_avenrun
[i
] >>
680 avenrun
[i
] = 0x7fffffff;
682 cpupart
= cp_list_head
;
684 calcloadavg(genloadavg(&cpupart
->cp_loadavg
),
685 cpupart
->cp_hp_avenrun
);
686 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
692 onesec_time_adjustments(void)
697 clock_t now
= LBOLT_NO_ACCOUNT
; /* current tick */
701 * Beginning of precision-kernel code fragment executed
704 * On rollover of the second the phase adjustment to be
705 * used for the next second is calculated. Also, the
706 * maximum error is increased by the tolerance. If the
707 * PPS frequency discipline code is present, the phase is
708 * increased to compensate for the CPU clock oscillator
711 * On a 32-bit machine and given parameters in the timex.h
712 * header file, the maximum phase adjustment is +-512 ms
713 * and maximum frequency offset is (a tad less than)
714 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
716 time_maxerror
+= time_tolerance
/ SCALE_USEC
;
719 * Leap second processing. If in leap-insert state at
720 * the end of the day, the system clock is set back one
721 * second; if in leap-delete state, the system clock is
722 * set ahead one second. The microtime() routine or
723 * external clock driver will insure that reported time
724 * is always monotonic. The ugly divides should be
727 switch (time_state
) {
730 if (time_status
& STA_INS
)
731 time_state
= TIME_INS
;
732 else if (time_status
& STA_DEL
)
733 time_state
= TIME_DEL
;
737 if (hrestime
.tv_sec
% 86400 == 0) {
741 time_state
= TIME_OOP
;
746 if ((hrestime
.tv_sec
+ 1) % 86400 == 0) {
750 time_state
= TIME_WAIT
;
755 time_state
= TIME_WAIT
;
759 if (!(time_status
& (STA_INS
| STA_DEL
)))
760 time_state
= TIME_OK
;
766 * Compute the phase adjustment for the next second. In
767 * PLL mode, the offset is reduced by a fixed factor
768 * times the time constant. In FLL mode the offset is
769 * used directly. In either mode, the maximum phase
770 * adjustment for each second is clamped so as to spread
771 * the adjustment over not more than the number of
772 * seconds between updates.
774 if (time_offset
== 0)
776 else if (time_offset
< 0) {
777 lltemp
= -time_offset
;
778 if (!(time_status
& STA_FLL
)) {
779 if ((1 << time_constant
) >= SCALE_KG
)
780 lltemp
*= (1 << time_constant
) /
783 lltemp
= (lltemp
/ SCALE_KG
) >>
786 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
787 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
788 time_offset
+= lltemp
;
789 time_adj
= -(lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
791 lltemp
= time_offset
;
792 if (!(time_status
& STA_FLL
)) {
793 if ((1 << time_constant
) >= SCALE_KG
)
794 lltemp
*= (1 << time_constant
) /
797 lltemp
= (lltemp
/ SCALE_KG
) >>
800 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
801 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
802 time_offset
-= lltemp
;
803 time_adj
= (lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
807 * Compute the frequency estimate and additional phase
808 * adjustment due to frequency error for the next
809 * second. When the PPS signal is engaged, gnaw on the
810 * watchdog counter and update the frequency computed by
811 * the pll and the PPS signal.
814 if (pps_valid
== PPS_VALID
) {
815 pps_jitter
= MAXTIME
;
816 pps_stabil
= MAXFREQ
;
817 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
818 STA_PPSWANDER
| STA_PPSERROR
);
820 lltemp
= time_freq
+ pps_freq
;
823 time_adj
+= (lltemp
* SCALE_PHASE
) / (SCALE_USEC
* hz
);
826 * End of precision kernel-code fragment
828 * The section below should be modified if we are planning
829 * to use NTP for synchronization.
831 * Note: the clock synchronization code now assumes
833 * - if dosynctodr is 1, then compute the drift between
834 * the tod chip and software time and adjust one or
835 * the other depending on the circumstances
837 * - if dosynctodr is 0, then the tod chip is independent
838 * of the software clock and should not be adjusted,
839 * but allowed to free run. this allows NTP to sync.
840 * hrestime without any interference from the tod chip.
843 tod_validate_deferred
= B_FALSE
;
844 mutex_enter(&tod_lock
);
846 drift
= tod
.tv_sec
- hrestime
.tv_sec
;
847 absdrift
= (drift
>= 0) ? drift
: -drift
;
848 if (tod_needsync
|| absdrift
> 1) {
851 if (!tod_broken
&& tod_faulted
== TOD_NOFAULT
) {
854 membar_enter(); /* hrestime visible */
863 if (tod_needsync
|| !dosynctodr
) {
872 * If the drift is 2 seconds on the
873 * money, then the TOD is adjusting
874 * the clock; record that.
876 clock_adj_hist
[adj_hist_entry
++ %
877 CLOCK_ADJ_HIST_SIZE
] = now
;
879 timedelta
= (int64_t)drift
*NANOSEC
;
884 time
= gethrestime_sec(); /* for crusty old kmem readers */
885 mutex_exit(&tod_lock
);
894 * Wakeup the cageout thread waiters once per second.
902 * Some drivers still depend on this... XXX
904 cv_broadcast(&lbolt_cv
);
910 cyc_handler_t clk_hdlr
, lbolt_hdlr
,load_averages_hdlr
;
911 cyc_time_t clk_when
, lbolt_when
, load_averages_when
;
912 cyc_handler_t onesec_time_adjustments_hdlr
, onesec_waiters_hdlr
;
913 cyc_time_t onesec_time_adjustments_when
, onesec_waiters_when
;
918 * Setup handler and timer for the clock cyclic.
920 clk_hdlr
.cyh_func
= (cyc_func_t
)clock
;
921 clk_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
922 clk_hdlr
.cyh_arg
= NULL
;
924 clk_when
.cyt_when
= 0;
925 clk_when
.cyt_interval
= nsec_per_tick
;
928 * Setup handler and timer for load_averages cyclic.
931 load_averages_hdlr
.cyh_func
= (cyc_func_t
)recompute_load_averages
;
932 load_averages_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
933 load_averages_hdlr
.cyh_arg
= NULL
;
935 load_averages_when
.cyt_when
= 0;
936 load_averages_when
.cyt_interval
= SEC2NSEC(1);
939 * Setup handler and timer for onesec_time_adjustments cyclic.
942 onesec_time_adjustments_hdlr
.cyh_func
= (cyc_func_t
)onesec_time_adjustments
;
943 onesec_time_adjustments_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
944 onesec_time_adjustments_hdlr
.cyh_arg
= NULL
;
946 onesec_time_adjustments_when
.cyt_when
= 0;
947 onesec_time_adjustments_when
.cyt_interval
= SEC2NSEC(1);
950 * Setup handler and timer for onesec_waiters cyclic.
953 onesec_waiters_hdlr
.cyh_func
= (cyc_func_t
)onesec_waiters
;
954 onesec_waiters_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
955 onesec_waiters_hdlr
.cyh_arg
= NULL
;
957 onesec_waiters_when
.cyt_when
= 0;
958 onesec_waiters_when
.cyt_interval
= SEC2NSEC(1);
961 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
962 * interval to satisfy performance needs of the DDI lbolt consumers.
963 * It is off by default.
965 lbolt_hdlr
.cyh_func
= (cyc_func_t
)lbolt_cyclic
;
966 lbolt_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
967 lbolt_hdlr
.cyh_arg
= NULL
;
969 lbolt_when
.cyt_interval
= nsec_per_tick
;
972 * Allocate cache line aligned space for the per CPU lbolt data and
973 * lbolt info structures, and initialize them with their default
974 * values. Note that these structures are also cache line sized.
976 sz
= sizeof (lbolt_info_t
) + CPU_CACHE_COHERENCE_SIZE
;
977 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
978 lb_info
= (lbolt_info_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
980 if (hz
!= HZ_DEFAULT
)
981 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
*
984 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
;
986 lb_info
->lbi_thresh_calls
= LBOLT_THRESH_CALLS
;
988 sz
= (sizeof (lbolt_cpu_t
) * max_ncpus
) + CPU_CACHE_COHERENCE_SIZE
;
989 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
990 lb_cpu
= (lbolt_cpu_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
992 for (i
= 0; i
< max_ncpus
; i
++)
993 lb_cpu
[i
].lbc_counter
= lb_info
->lbi_thresh_calls
;
996 * Install the softint used to switch between event and cyclic driven
997 * lbolt. We use a soft interrupt to make sure the context of the
998 * cyclic reprogram call is safe.
1000 lbolt_softint_add();
1003 * Since the hybrid lbolt implementation is based on a hardware counter
1004 * that is reset at every hardware reboot and that we'd like to have
1005 * the lbolt value starting at zero after both a hardware and a fast
1006 * reboot, we calculate the number of clock ticks the system's been up
1007 * and store it in the lbi_debug_time field of the lbolt info structure.
1008 * The value of this field will be subtracted from lbolt before
1011 lb_info
->lbi_internal
= lb_info
->lbi_debug_time
=
1012 (gethrtime()/nsec_per_tick
);
1015 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1016 * and lbolt_debug_{enter,return} use this value as an indication that
1017 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1018 * to either lbolt_{cyclic,event}_driven here signals those code paths
1019 * that the lbolt related structures can be used.
1021 if (lbolt_cyc_only
) {
1022 lbolt_when
.cyt_when
= 0;
1023 lbolt_hybrid
= lbolt_cyclic_driven
;
1025 lbolt_when
.cyt_when
= CY_INFINITY
;
1026 lbolt_hybrid
= lbolt_event_driven
;
1030 * Grab cpu_lock and install all six cyclics.
1032 mutex_enter(&cpu_lock
);
1034 clock_cyclic
= cyclic_add(&clk_hdlr
, &clk_when
);
1035 lb_info
->id
.lbi_cyclic_id
= cyclic_add(&lbolt_hdlr
, &lbolt_when
);
1036 recompute_load_averages_cyclic
=
1037 cyclic_add(&load_averages_hdlr
, &load_averages_when
);
1038 onesec_time_adjustments_cyclic
=
1039 cyclic_add(&onesec_time_adjustments_hdlr
, &onesec_time_adjustments_when
);
1040 onesec_waiters_cyclic
= cyclic_add(&onesec_waiters_hdlr
, &onesec_waiters_when
);
1042 mutex_exit(&cpu_lock
);
1046 * Called before calcloadavg to get 10-sec moving loadavg together
1050 genloadavg(struct loadavg_s
*avgs
)
1053 int spos
; /* starting position */
1054 int cpos
; /* moving current position */
1059 /* 10-second snapshot, calculate first positon */
1060 if (avgs
->lg_len
== 0) {
1063 slen
= avgs
->lg_len
< S_MOVAVG_SZ
? avgs
->lg_len
: S_MOVAVG_SZ
;
1065 spos
= (avgs
->lg_cur
- 1) >= 0 ? avgs
->lg_cur
- 1 :
1066 S_LOADAVG_SZ
+ (avgs
->lg_cur
- 1);
1067 for (i
= hr_avg
= 0; i
< slen
; i
++) {
1068 cpos
= (spos
- i
) >= 0 ? spos
- i
: S_LOADAVG_SZ
+ (spos
- i
);
1069 hr_avg
+= avgs
->lg_loads
[cpos
];
1072 hr_avg
= hr_avg
/ slen
;
1073 avg
= hr_avg
/ (NANOSEC
/ LGRP_LOADAVG_IN_THREAD_MAX
);
1079 * Run every second from clock () to update the loadavg count available to the
1080 * system and cpu-partitions.
1082 * This works by sampling the previous usr, sys, wait time elapsed,
1083 * computing a delta, and adding that delta to the elapsed usr, sys,
1096 loadavg
.lg_total
= 0;
1099 * first pass totals up per-cpu statistics for system and cpu
1104 struct loadavg_s
*lavg
;
1106 lavg
= &cp
->cpu_loadavg
;
1108 cpu_total
= cp
->cpu_acct
[CMS_USER
] +
1109 cp
->cpu_acct
[CMS_SYSTEM
] + cp
->cpu_waitrq
;
1110 /* compute delta against last total */
1111 scalehrtime(&cpu_total
);
1112 prev
= (lavg
->lg_cur
- 1) >= 0 ? lavg
->lg_cur
- 1 :
1113 S_LOADAVG_SZ
+ (lavg
->lg_cur
- 1);
1114 if (lavg
->lg_loads
[prev
] <= 0) {
1115 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1118 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1119 cpu_total
= cpu_total
- lavg
->lg_loads
[prev
];
1124 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1125 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1126 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1128 loadavg
.lg_total
+= cpu_total
;
1129 cp
->cpu_part
->cp_loadavg
.lg_total
+= cpu_total
;
1131 } while ((cp
= cp
->cpu_next
) != cpu_list
);
1133 loadavg
.lg_loads
[loadavg
.lg_cur
] = loadavg
.lg_total
;
1134 loadavg
.lg_cur
= (loadavg
.lg_cur
+ 1) % S_LOADAVG_SZ
;
1135 loadavg
.lg_len
= (loadavg
.lg_len
+ 1) < S_LOADAVG_SZ
?
1136 loadavg
.lg_len
+ 1 : S_LOADAVG_SZ
;
1138 * Second pass updates counts
1140 cpupart
= cp_list_head
;
1143 struct loadavg_s
*lavg
;
1145 lavg
= &cpupart
->cp_loadavg
;
1146 lavg
->lg_loads
[lavg
->lg_cur
] = lavg
->lg_total
;
1148 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1149 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1150 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1152 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
1155 * Third pass totals up per-zone statistics.
1157 zone_loadavg_update();
1161 * clock_update() - local clock update
1163 * This routine is called by ntp_adjtime() to update the local clock
1164 * phase and frequency. The implementation is of an
1165 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1166 * routine computes new time and frequency offset estimates for each
1167 * call. The PPS signal itself determines the new time offset,
1168 * instead of the calling argument. Presumably, calls to
1169 * ntp_adjtime() occur only when the caller believes the local clock
1170 * is valid within some bound (+-128 ms with NTP). If the caller's
1171 * time is far different than the PPS time, an argument will ensue,
1172 * and it's not clear who will lose.
1174 * For uncompensated quartz crystal oscillatores and nominal update
1175 * intervals less than 1024 s, operation should be in phase-lock mode
1176 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1177 * intervals greater than this, operation should be in frequency-lock
1178 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1180 * Note: mutex(&tod_lock) is in effect.
1183 clock_update(int offset
)
1185 int ltemp
, mtemp
, s
;
1187 ASSERT(MUTEX_HELD(&tod_lock
));
1189 if (!(time_status
& STA_PLL
) && !(time_status
& STA_PPSTIME
))
1192 if ((time_status
& STA_PPSTIME
) && (time_status
& STA_PPSSIGNAL
))
1196 * Scale the phase adjustment and clamp to the operating range.
1198 if (ltemp
> MAXPHASE
)
1199 time_offset
= MAXPHASE
* SCALE_UPDATE
;
1200 else if (ltemp
< -MAXPHASE
)
1201 time_offset
= -(MAXPHASE
* SCALE_UPDATE
);
1203 time_offset
= ltemp
* SCALE_UPDATE
;
1206 * Select whether the frequency is to be controlled and in which
1207 * mode (PLL or FLL). Clamp to the operating range. Ugly
1208 * multiply/divide should be replaced someday.
1210 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0)
1211 time_reftime
= hrestime
.tv_sec
;
1213 mtemp
= hrestime
.tv_sec
- time_reftime
;
1214 time_reftime
= hrestime
.tv_sec
;
1216 if (time_status
& STA_FLL
) {
1217 if (mtemp
>= MINSEC
) {
1218 ltemp
= ((time_offset
/ mtemp
) * (SCALE_USEC
/
1221 time_freq
+= ltemp
/ SCALE_KH
;
1224 if (mtemp
< MAXSEC
) {
1227 time_freq
+= (int)(((int64_t)ltemp
*
1228 SCALE_USEC
) / SCALE_KF
)
1229 / (1 << (time_constant
* 2));
1232 if (time_freq
> time_tolerance
)
1233 time_freq
= time_tolerance
;
1234 else if (time_freq
< -time_tolerance
)
1235 time_freq
= -time_tolerance
;
1237 s
= hr_clock_lock();
1243 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1245 * This routine is called at each PPS interrupt in order to discipline
1246 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1247 * and leaves it in a handy spot for the clock() routine. It
1248 * integrates successive PPS phase differences and calculates the
1249 * frequency offset. This is used in clock() to discipline the CPU
1250 * clock oscillator so that intrinsic frequency error is cancelled out.
1251 * The code requires the caller to capture the time and hardware counter
1252 * value at the on-time PPS signal transition.
1254 * Note that, on some Unix systems, this routine runs at an interrupt
1255 * priority level higher than the timer interrupt routine clock().
1256 * Therefore, the variables used are distinct from the clock()
1257 * variables, except for certain exceptions: The PPS frequency pps_freq
1258 * and phase pps_offset variables are determined by this routine and
1259 * updated atomically. The time_tolerance variable can be considered a
1260 * constant, since it is infrequently changed, and then only when the
1261 * PPS signal is disabled. The watchdog counter pps_valid is updated
1262 * once per second by clock() and is atomically cleared in this
1265 * tvp is the time of the last tick; usec is a microsecond count since the
1268 * Note: In Solaris systems, the tick value is actually given by
1269 * usec_per_tick. This is called from the serial driver cdintr(),
1270 * or equivalent, at a high PIL. Because the kernel keeps a
1271 * highresolution time, the following code can accept either
1272 * the traditional argument pair, or the current highres timestamp
1273 * in tvp and zero in usec.
1276 ddi_hardpps(struct timeval
*tvp
, int usec
)
1278 int u_usec
, v_usec
, bigtick
;
1283 * An occasional glitch can be produced when the PPS interrupt
1284 * occurs in the clock() routine before the time variable is
1285 * updated. Here the offset is discarded when the difference
1286 * between it and the last one is greater than tick/2, but not
1287 * if the interval since the first discard exceeds 30 s.
1289 time_status
|= STA_PPSSIGNAL
;
1290 time_status
&= ~(STA_PPSJITTER
| STA_PPSWANDER
| STA_PPSERROR
);
1292 u_usec
= -tvp
->tv_usec
;
1293 if (u_usec
< -(MICROSEC
/2))
1295 v_usec
= pps_offset
- u_usec
;
1298 if (v_usec
> (usec_per_tick
>> 1)) {
1299 if (pps_glitch
> MAXGLITCH
) {
1305 u_usec
= pps_offset
;
1311 * A three-stage median filter is used to help deglitch the pps
1312 * time. The median sample becomes the time offset estimate; the
1313 * difference between the other two samples becomes the time
1314 * dispersion (jitter) estimate.
1316 pps_tf
[2] = pps_tf
[1];
1317 pps_tf
[1] = pps_tf
[0];
1319 if (pps_tf
[0] > pps_tf
[1]) {
1320 if (pps_tf
[1] > pps_tf
[2]) {
1321 pps_offset
= pps_tf
[1]; /* 0 1 2 */
1322 v_usec
= pps_tf
[0] - pps_tf
[2];
1323 } else if (pps_tf
[2] > pps_tf
[0]) {
1324 pps_offset
= pps_tf
[0]; /* 2 0 1 */
1325 v_usec
= pps_tf
[2] - pps_tf
[1];
1327 pps_offset
= pps_tf
[2]; /* 0 2 1 */
1328 v_usec
= pps_tf
[0] - pps_tf
[1];
1331 if (pps_tf
[1] < pps_tf
[2]) {
1332 pps_offset
= pps_tf
[1]; /* 2 1 0 */
1333 v_usec
= pps_tf
[2] - pps_tf
[0];
1334 } else if (pps_tf
[2] < pps_tf
[0]) {
1335 pps_offset
= pps_tf
[0]; /* 1 0 2 */
1336 v_usec
= pps_tf
[1] - pps_tf
[2];
1338 pps_offset
= pps_tf
[2]; /* 1 2 0 */
1339 v_usec
= pps_tf
[1] - pps_tf
[0];
1342 if (v_usec
> MAXTIME
)
1344 v_usec
= (v_usec
<< PPS_AVG
) - pps_jitter
;
1345 pps_jitter
+= v_usec
/ (1 << PPS_AVG
);
1346 if (pps_jitter
> (MAXTIME
>> 1))
1347 time_status
|= STA_PPSJITTER
;
1350 * During the calibration interval adjust the starting time when
1351 * the tick overflows. At the end of the interval compute the
1352 * duration of the interval and the difference of the hardware
1353 * counters at the beginning and end of the interval. This code
1354 * is deliciously complicated by the fact valid differences may
1355 * exceed the value of tick when using long calibration
1356 * intervals and small ticks. Note that the counter can be
1357 * greater than tick if caught at just the wrong instant, but
1358 * the values returned and used here are correct.
1360 bigtick
= (int)usec_per_tick
* SCALE_USEC
;
1361 pps_usec
-= pps_freq
;
1362 if (pps_usec
>= bigtick
)
1363 pps_usec
-= bigtick
;
1365 pps_usec
+= bigtick
;
1368 if (pps_count
< (1 << pps_shift
))
1372 u_usec
= usec
* SCALE_USEC
;
1373 v_usec
= pps_usec
- u_usec
;
1374 if (v_usec
>= bigtick
>> 1)
1376 if (v_usec
< -(bigtick
>> 1))
1379 v_usec
= -(-v_usec
>> pps_shift
);
1381 v_usec
= v_usec
>> pps_shift
;
1383 cal_sec
= tvp
->tv_sec
;
1384 cal_usec
= tvp
->tv_usec
;
1385 cal_sec
-= pps_time
.tv_sec
;
1386 cal_usec
-= pps_time
.tv_usec
;
1388 cal_usec
+= MICROSEC
;
1394 * Check for lost interrupts, noise, excessive jitter and
1395 * excessive frequency error. The number of timer ticks during
1396 * the interval may vary +-1 tick. Add to this a margin of one
1397 * tick for the PPS signal jitter and maximum frequency
1398 * deviation. If the limits are exceeded, the calibration
1399 * interval is reset to the minimum and we start over.
1401 u_usec
= (int)usec_per_tick
<< 1;
1402 if (!((cal_sec
== -1 && cal_usec
> (MICROSEC
- u_usec
)) ||
1403 (cal_sec
== 0 && cal_usec
< u_usec
)) ||
1404 v_usec
> time_tolerance
|| v_usec
< -time_tolerance
) {
1406 pps_shift
= PPS_SHIFT
;
1408 time_status
|= STA_PPSERROR
;
1413 * A three-stage median filter is used to help deglitch the pps
1414 * frequency. The median sample becomes the frequency offset
1415 * estimate; the difference between the other two samples
1416 * becomes the frequency dispersion (stability) estimate.
1418 pps_ff
[2] = pps_ff
[1];
1419 pps_ff
[1] = pps_ff
[0];
1421 if (pps_ff
[0] > pps_ff
[1]) {
1422 if (pps_ff
[1] > pps_ff
[2]) {
1423 u_usec
= pps_ff
[1]; /* 0 1 2 */
1424 v_usec
= pps_ff
[0] - pps_ff
[2];
1425 } else if (pps_ff
[2] > pps_ff
[0]) {
1426 u_usec
= pps_ff
[0]; /* 2 0 1 */
1427 v_usec
= pps_ff
[2] - pps_ff
[1];
1429 u_usec
= pps_ff
[2]; /* 0 2 1 */
1430 v_usec
= pps_ff
[0] - pps_ff
[1];
1433 if (pps_ff
[1] < pps_ff
[2]) {
1434 u_usec
= pps_ff
[1]; /* 2 1 0 */
1435 v_usec
= pps_ff
[2] - pps_ff
[0];
1436 } else if (pps_ff
[2] < pps_ff
[0]) {
1437 u_usec
= pps_ff
[0]; /* 1 0 2 */
1438 v_usec
= pps_ff
[1] - pps_ff
[2];
1440 u_usec
= pps_ff
[2]; /* 1 2 0 */
1441 v_usec
= pps_ff
[1] - pps_ff
[0];
1446 * Here the frequency dispersion (stability) is updated. If it
1447 * is less than one-fourth the maximum (MAXFREQ), the frequency
1448 * offset is updated as well, but clamped to the tolerance. It
1449 * will be processed later by the clock() routine.
1451 v_usec
= (v_usec
>> 1) - pps_stabil
;
1453 pps_stabil
-= -v_usec
>> PPS_AVG
;
1455 pps_stabil
+= v_usec
>> PPS_AVG
;
1456 if (pps_stabil
> MAXFREQ
>> 2) {
1458 time_status
|= STA_PPSWANDER
;
1461 if (time_status
& STA_PPSFREQ
) {
1463 pps_freq
-= -u_usec
>> PPS_AVG
;
1464 if (pps_freq
< -time_tolerance
)
1465 pps_freq
= -time_tolerance
;
1468 pps_freq
+= u_usec
>> PPS_AVG
;
1469 if (pps_freq
> time_tolerance
)
1470 pps_freq
= time_tolerance
;
1475 * Here the calibration interval is adjusted. If the maximum
1476 * time difference is greater than tick / 4, reduce the interval
1477 * by half. If this is not the case for four consecutive
1478 * intervals, double the interval.
1480 if (u_usec
<< pps_shift
> bigtick
>> 2) {
1482 if (pps_shift
> PPS_SHIFT
)
1484 } else if (pps_intcnt
>= 4) {
1486 if (pps_shift
< PPS_SHIFTMAX
)
1492 * If recovering from kmdb, then make sure the tod chip gets resynced.
1493 * If we took an early exit above, then we don't yet have a stable
1494 * calibration signal to lock onto, so don't mark the tod for sync
1495 * until we get all the way here.
1498 int s
= hr_clock_lock();
1506 * Handle clock tick processing for a thread.
1507 * Check for timer action, enforce CPU rlimit, do profiling etc.
1510 clock_tick(kthread_t
*t
, int pending
)
1516 int poke
= 0; /* notify another CPU */
1519 int i
, total_usec
, usec
;
1522 ASSERT(pending
> 0);
1524 /* Must be operating on a lwp/thread */
1525 if ((lwp
= ttolwp(t
)) == NULL
) {
1526 panic("clock_tick: no lwp");
1530 for (i
= 0; i
< pending
; i
++) {
1531 CL_TICK(t
); /* Class specific tick processing */
1532 DTRACE_SCHED1(tick
, kthread_t
*, t
);
1537 /* pp->p_lock makes sure that the thread does not exit */
1538 ASSERT(MUTEX_HELD(&pp
->p_lock
));
1540 user_mode
= (lwp
->lwp_state
== LWP_USER
);
1542 ticks
= (pp
->p_utime
+ pp
->p_stime
) % hz
;
1544 * Update process times. Should use high res clock and state
1545 * changes instead of statistical sampling method. XXX
1548 pp
->p_utime
+= pending
;
1550 pp
->p_stime
+= pending
;
1553 pp
->p_ttime
+= pending
;
1557 * Update user profiling statistics. Get the pc from the
1558 * lwp when the AST happens.
1560 if (pp
->p_prof
.pr_scale
) {
1561 atomic_add_32(&lwp
->lwp_oweupc
, (int32_t)pending
);
1569 * If CPU was in user state, process lwp-virtual time
1570 * interval timer. The value passed to itimerdecr() has to be
1571 * in microseconds and has to be less than one second. Hence
1574 total_usec
= usec_per_tick
* pending
;
1575 while (total_usec
> 0) {
1576 usec
= MIN(total_usec
, (MICROSEC
- 1));
1578 timerisset(&lwp
->lwp_timer
[ITIMER_VIRTUAL
].it_value
) &&
1579 itimerdecr(&lwp
->lwp_timer
[ITIMER_VIRTUAL
], usec
) == 0) {
1581 sigtoproc(pp
, t
, SIGVTALRM
);
1587 * If CPU was in user state, process lwp-profile
1590 total_usec
= usec_per_tick
* pending
;
1591 while (total_usec
> 0) {
1592 usec
= MIN(total_usec
, (MICROSEC
- 1));
1593 if (timerisset(&lwp
->lwp_timer
[ITIMER_PROF
].it_value
) &&
1594 itimerdecr(&lwp
->lwp_timer
[ITIMER_PROF
], usec
) == 0) {
1596 sigtoproc(pp
, t
, SIGPROF
);
1602 * Enforce CPU resource controls:
1603 * (a) process.max-cpu-time resource control
1605 * Perform the check only if we have accumulated more a second.
1607 if ((ticks
+ pending
) >= hz
) {
1608 (void) rctl_test(rctlproc_legacy
[RLIMIT_CPU
], pp
->p_rctls
, pp
,
1609 (pp
->p_utime
+ pp
->p_stime
)/hz
, RCA_UNSAFE_SIGINFO
);
1613 * (b) task.max-cpu-time resource control
1615 * If we have accumulated enough ticks, increment the task CPU
1616 * time usage and test for the resource limit. This minimizes the
1617 * number of calls to the rct_test(). The task CPU time mutex
1618 * is highly contentious as many processes can be sharing a task.
1620 if (pp
->p_ttime
>= clock_tick_proc_max
) {
1621 secs
= task_cpu_time_incr(pp
->p_task
, pp
->p_ttime
);
1624 (void) rctl_test(rc_task_cpu_time
, pp
->p_task
->tk_rctls
,
1625 pp
, secs
, RCA_UNSAFE_SIGINFO
);
1630 * Update memory usage for the currently running process.
1633 PTOU(pp
)->u_mem
+= rss
;
1634 if (rss
> PTOU(pp
)->u_mem_max
)
1635 PTOU(pp
)->u_mem_max
= rss
;
1638 * Notify the CPU the thread is running on.
1640 if (poke
&& t
->t_cpu
!= CPU
)
1641 poke_cpu(t
->t_cpu
->cpu_id
);
1645 profil_tick(uintptr_t upc
)
1648 proc_t
*p
= ttoproc(curthread
);
1649 klwp_t
*lwp
= ttolwp(curthread
);
1650 struct prof
*pr
= &p
->p_prof
;
1653 ticks
= lwp
->lwp_oweupc
;
1654 } while (atomic_cas_32(&lwp
->lwp_oweupc
, ticks
, 0) != ticks
);
1656 mutex_enter(&p
->p_pflock
);
1657 if (pr
->pr_scale
>= 2 && upc
>= pr
->pr_off
) {
1659 * Old-style profiling
1661 uint16_t *slot
= pr
->pr_base
;
1663 if (pr
->pr_scale
!= 2) {
1664 uintptr_t delta
= upc
- pr
->pr_off
;
1665 uintptr_t byteoff
= ((delta
>> 16) * pr
->pr_scale
) +
1666 (((delta
& 0xffff) * pr
->pr_scale
) >> 16);
1667 if (byteoff
>= (uintptr_t)pr
->pr_size
) {
1668 mutex_exit(&p
->p_pflock
);
1671 slot
+= byteoff
/ sizeof (uint16_t);
1673 if (fuword16(slot
, &old
) < 0 ||
1674 (new = old
+ ticks
) > SHRT_MAX
||
1675 suword16(slot
, new) < 0) {
1678 } else if (pr
->pr_scale
== 1) {
1682 model_t model
= lwp_getdatamodel(lwp
);
1684 while (ticks
-- > 0) {
1685 if (pr
->pr_samples
== pr
->pr_size
) {
1686 /* buffer full, turn off sampling */
1690 switch (SIZEOF_PTR(model
)) {
1691 case sizeof (uint32_t):
1692 result
= suword32(pr
->pr_base
, (uint32_t)upc
);
1695 case sizeof (uint64_t):
1696 result
= suword64(pr
->pr_base
, (uint64_t)upc
);
1700 cmn_err(CE_WARN
, "profil_tick: unexpected "
1709 pr
->pr_base
= (caddr_t
)pr
->pr_base
+ SIZEOF_PTR(model
);
1713 mutex_exit(&p
->p_pflock
);
1717 delay_wakeup(void *arg
)
1721 mutex_enter(&t
->t_delay_lock
);
1722 cv_signal(&t
->t_delay_cv
);
1723 mutex_exit(&t
->t_delay_lock
);
1727 * The delay(9F) man page indicates that it can only be called from user or
1728 * kernel context - detect and diagnose bad calls. The following macro will
1729 * produce a limited number of messages identifying bad callers. This is done
1730 * in a macro so that caller() is meaningful. When a bad caller is identified,
1731 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1733 #define DELAY_CONTEXT_CHECK() { \
1738 m = delay_from_interrupt_msg; \
1739 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1740 !panicstr && !devinfo_freeze && \
1741 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1742 f = modgetsymname((uintptr_t)caller(), &off); \
1743 cmn_err(CE_WARN, "delay(9F) called from " \
1744 "interrupt context: %s`%s", \
1745 mod_containing_pc(caller()), f ? f : "..."); \
1750 * delay_common: common delay code.
1753 delay_common(clock_t ticks
)
1755 kthread_t
*t
= curthread
;
1760 /* If timeouts aren't running all we can do is spin. */
1761 if (panicstr
|| devinfo_freeze
) {
1762 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1764 drv_usecwait(TICK_TO_USEC(ticks
));
1768 deadline
= ddi_get_lbolt() + ticks
;
1769 while ((timeleft
= deadline
- ddi_get_lbolt()) > 0) {
1770 mutex_enter(&t
->t_delay_lock
);
1771 id
= timeout_default(delay_wakeup
, t
, timeleft
);
1772 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1773 mutex_exit(&t
->t_delay_lock
);
1774 (void) untimeout_default(id
, 0);
1779 * Delay specified number of clock ticks.
1782 delay(clock_t ticks
)
1784 DELAY_CONTEXT_CHECK();
1786 delay_common(ticks
);
1790 * Delay a random number of clock ticks between 1 and ticks.
1793 delay_random(clock_t ticks
)
1797 DELAY_CONTEXT_CHECK();
1799 (void) random_get_pseudo_bytes((void *)&r
, sizeof (r
));
1802 ticks
= (r
% ticks
) + 1;
1803 delay_common(ticks
);
1807 * Like delay, but interruptible by a signal.
1810 delay_sig(clock_t ticks
)
1812 kthread_t
*t
= curthread
;
1816 /* If timeouts aren't running all we can do is spin. */
1817 if (panicstr
|| devinfo_freeze
) {
1819 drv_usecwait(TICK_TO_USEC(ticks
));
1823 deadline
= ddi_get_lbolt() + ticks
;
1824 mutex_enter(&t
->t_delay_lock
);
1826 rc
= cv_timedwait_sig(&t
->t_delay_cv
,
1827 &t
->t_delay_lock
, deadline
);
1828 /* loop until past deadline or signaled */
1830 mutex_exit(&t
->t_delay_lock
);
1837 ddi_sleep_common(hrtime_t delay
, hrtime_t resolution
)
1839 kthread_t
*t
= curthread
;
1844 /* If timeouts aren't running all we can do is spin. */
1845 if (panicstr
|| devinfo_freeze
) {
1846 /* Convert ddi_*sleep(9F) call into drv_usecwait(9F) call. */
1847 if (NSEC2USEC(delay
) > 0)
1848 drv_usecwait(NSEC2USEC(delay
));
1853 * TODO: does this need to be in a loop checking that we didn't get
1854 * woken up too early?
1856 mutex_enter(&t
->t_delay_lock
);
1858 id
= timeout_generic(CALLOUT_NORMAL
, delay_wakeup
, t
, delay
,
1859 resolution
, CALLOUT_FLAG_ROUNDUP
);
1860 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1861 mutex_exit(&t
->t_delay_lock
);
1862 (void) untimeout_generic(id
, 0);
1863 if (gethrtime() - tmp
< delay
)
1864 cmn_err(CE_WARN
, "%s returned too soon (wanted %llu, got %llu)",
1865 __func__
, delay
, gethrtime() - tmp
);
1869 ddi_sleep(clock_t secs
)
1874 * We don't want to use 1 s resulution unconditionally because of
1875 * how it is used for rounding up the deadline. With 1 s
1876 * resolution, a sleep of 1 second can take anywhere from 1 to
1877 * 1.999999999 seconds on an idle system. This seems unacceptable,
1878 * and so we use either 100 ms or 10% of sleep interval as the
1879 * resolution - whichever is smaller.
1881 * (There is a similar issue with the milli- and micro- sleep
1882 * functions, but somehow an extra 1 ms or 1us doesn't seem as bad.)
1885 res
= MIN(100000000 /* 100 ms */, SEC2NSEC(secs
) / 10);
1887 res
= 100000000; /* 100 ms */
1889 ddi_sleep_common(SEC2NSEC(secs
), res
);
1893 ddi_msleep(clock_t msecs
)
1895 ddi_sleep_common(MSEC2NSEC(msecs
), 1000000 /* 1 ms */);
1899 ddi_usleep(clock_t usecs
)
1901 ddi_sleep_common(USEC2NSEC(usecs
), 1000 /* 1 us */);
1905 #define SECONDS_PER_DAY 86400
1908 * Initialize the system time based on the TOD chip. approx is used as
1909 * an approximation of time (e.g. from the filesystem) in the event that
1910 * the TOD chip has been cleared or is unresponsive. An approx of -1
1911 * means the filesystem doesn't keep time.
1914 clkset(time_t approx
)
1920 mutex_enter(&tod_lock
);
1923 if (ts
.tv_sec
> 365 * SECONDS_PER_DAY
) {
1925 * If the TOD chip is reporting some time after 1971,
1926 * then it probably didn't lose power or become otherwise
1927 * cleared in the recent past; check to assure that
1928 * the time coming from the filesystem isn't in the future
1929 * according to the TOD chip.
1931 if (approx
!= -1 && approx
> ts
.tv_sec
) {
1932 cmn_err(CE_WARN
, "Last shutdown is later "
1933 "than time on time-of-day chip; check date.");
1937 * If the TOD chip isn't giving correct time, set it to the
1938 * greater of i) approx and ii) 1987. That way if approx
1939 * is negative or is earlier than 1987, we set the clock
1940 * back to a time when Oliver North, ALF and Dire Straits
1941 * were all on the collective brain: 1987.
1944 time_t diagnose_date
= (1987 - 1970) * 365 * SECONDS_PER_DAY
;
1945 ts
.tv_sec
= (approx
> diagnose_date
? approx
: diagnose_date
);
1949 * Attempt to write the new time to the TOD chip. Set spl high
1950 * to avoid getting preempted between the tod_set and tod_get.
1957 if (tmp
.tv_sec
!= ts
.tv_sec
&& tmp
.tv_sec
!= ts
.tv_sec
+ 1) {
1960 cmn_err(CE_WARN
, "Time-of-day chip unresponsive.");
1962 cmn_err(CE_WARN
, "Time-of-day chip had "
1963 "incorrect date; check and reset.");
1969 boot_time
= ts
.tv_sec
;
1976 mutex_exit(&tod_lock
);
1979 int timechanged
; /* for testing if the system time has been reset */
1982 set_hrestime(timestruc_t
*ts
)
1984 int spl
= hr_clock_lock();
1986 membar_enter(); /* hrestime must be visible before timechanged++ */
1989 hr_clock_unlock(spl
);
1993 static uint_t deadman_seconds
;
1994 static uint32_t deadman_panics
;
1995 static int deadman_enabled
= 0;
1996 static int deadman_panic_timers
= 1;
2003 * During panic, other CPUs besides the panic
2004 * master continue to handle cyclics and some other
2005 * interrupts. The code below is intended to be
2006 * single threaded, so any CPU other than the master
2009 if (CPU
->cpu_id
!= panic_cpu
.cpu_id
)
2012 if (!deadman_panic_timers
)
2013 return; /* allow all timers to be manually disabled */
2016 * If we are generating a crash dump or syncing filesystems and
2017 * the corresponding timer is set, decrement it and re-enter
2018 * the panic code to abort it and advance to the next state.
2019 * The panic states and triggers are explained in panic.c.
2022 if (dump_timeleft
&& (--dump_timeleft
== 0)) {
2023 panic("panic dump timeout");
2030 if (deadman_counter
!= CPU
->cpu_deadman_counter
) {
2031 CPU
->cpu_deadman_counter
= deadman_counter
;
2032 CPU
->cpu_deadman_countdown
= deadman_seconds
;
2036 if (--CPU
->cpu_deadman_countdown
> 0)
2040 * Regardless of whether or not we actually bring the system down,
2041 * bump the deadman_panics variable.
2043 * N.B. deadman_panics is incremented once for each CPU that
2044 * passes through here. It's expected that all the CPUs will
2045 * detect this condition within one second of each other, so
2046 * when deadman_enabled is off, deadman_panics will
2047 * typically be a multiple of the total number of CPUs in
2050 atomic_inc_32(&deadman_panics
);
2052 if (!deadman_enabled
) {
2053 CPU
->cpu_deadman_countdown
= deadman_seconds
;
2058 * If we're here, we want to bring the system down.
2060 panic("deadman: timed out after %d seconds of clock "
2061 "inactivity", deadman_seconds
);
2067 deadman_online(void *arg
, cpu_t
*cpu
, cyc_handler_t
*hdlr
, cyc_time_t
*when
)
2069 cpu
->cpu_deadman_counter
= 0;
2070 cpu
->cpu_deadman_countdown
= deadman_seconds
;
2072 hdlr
->cyh_func
= (cyc_func_t
)deadman
;
2073 hdlr
->cyh_level
= CY_HIGH_LEVEL
;
2074 hdlr
->cyh_arg
= NULL
;
2077 * Stagger the CPUs so that they don't all run deadman() at
2078 * the same time. Simplest reason to do this is to make it
2079 * more likely that only one CPU will panic in case of a
2080 * timeout. This is (strictly speaking) an aesthetic, not a
2081 * technical consideration.
2083 when
->cyt_when
= cpu
->cpu_id
* (NANOSEC
/ NCPU
);
2084 when
->cyt_interval
= NANOSEC
;
2091 cyc_omni_handler_t hdlr
;
2093 if (deadman_seconds
== 0)
2094 deadman_seconds
= snoop_interval
/ MICROSEC
;
2097 deadman_enabled
= 1;
2099 hdlr
.cyo_online
= deadman_online
;
2100 hdlr
.cyo_offline
= NULL
;
2101 hdlr
.cyo_arg
= NULL
;
2103 mutex_enter(&cpu_lock
);
2104 deadman_cyclic
= cyclic_add_omni(&hdlr
);
2105 mutex_exit(&cpu_lock
);
2109 * tod_fault() is for updating tod validate mechanism state:
2110 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2111 * currently used for debugging only
2112 * (2) The following four cases detected by tod validate mechanism:
2113 * TOD_REVERSED: current tod value is less than previous value.
2114 * TOD_STALLED: current tod value hasn't advanced.
2115 * TOD_JUMPED: current tod value advanced too far from previous value.
2116 * TOD_RATECHANGED: the ratio between average tod delta and
2117 * average tick delta has changed.
2118 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2119 * a virtual TOD provided by a hypervisor.
2122 tod_fault(enum tod_fault_type ftype
, int off
)
2124 ASSERT(MUTEX_HELD(&tod_lock
));
2126 if (tod_faulted
!= ftype
) {
2129 plat_tod_fault(TOD_NOFAULT
);
2130 cmn_err(CE_NOTE
, "Restarted tracking "
2131 "Time of Day clock.");
2132 tod_faulted
= ftype
;
2136 if (tod_faulted
== TOD_NOFAULT
) {
2137 plat_tod_fault(ftype
);
2138 cmn_err(CE_WARN
, "Time of Day clock error: "
2139 "reason [%s by 0x%x]. -- "
2140 " Stopped tracking Time Of Day clock.",
2141 tod_fault_table
[ftype
], off
);
2142 tod_faulted
= ftype
;
2146 case TOD_RATECHANGED
:
2147 if (tod_faulted
== TOD_NOFAULT
) {
2148 plat_tod_fault(ftype
);
2149 cmn_err(CE_WARN
, "Time of Day clock error: "
2151 " Stopped tracking Time Of Day clock.",
2152 tod_fault_table
[ftype
]);
2153 tod_faulted
= ftype
;
2157 if (tod_faulted
== TOD_NOFAULT
) {
2158 plat_tod_fault(ftype
);
2159 cmn_err(CE_NOTE
, "!Time of Day clock is "
2160 "Read-Only; set of Date/Time will not "
2161 "persist across reboot.");
2162 tod_faulted
= ftype
;
2169 return (tod_faulted
);
2173 * Two functions that allow tod_status_flag to be manipulated by functions
2174 * external to this file.
2178 tod_status_set(int tod_flag
)
2180 tod_status_flag
|= tod_flag
;
2184 tod_status_clear(int tod_flag
)
2186 tod_status_flag
&= ~tod_flag
;
2190 * Record a timestamp and the value passed to tod_set(). The next call to
2191 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2192 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2193 * tod_validate() will use prev_tick and prev_tod for this task but these
2194 * become obsolete, and will be re-assigned with the prev_set_* values,
2195 * in the case when the TOD is re-written.
2198 tod_set_prev(timestruc_t ts
)
2200 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2201 tod_validate_deferred
) {
2204 prev_set_tick
= gethrtime();
2206 * A negative value will be set to zero in utc_to_tod() so we fake
2207 * a zero here in such a case. This would need to change if the
2208 * behavior of utc_to_tod() changes.
2210 prev_set_tod
= ts
.tv_sec
< 0 ? 0 : ts
.tv_sec
;
2214 * tod_validate() is used for checking values returned by tod_get().
2215 * Four error cases can be detected by this routine:
2216 * TOD_REVERSED: current tod value is less than previous.
2217 * TOD_STALLED: current tod value hasn't advanced.
2218 * TOD_JUMPED: current tod value advanced too far from previous value.
2219 * TOD_RATECHANGED: the ratio between average tod delta and
2220 * average tick delta has changed.
2223 tod_validate(time_t tod
)
2232 enum tod_fault_type tod_bad
= TOD_NOFAULT
;
2234 static int firsttime
= 1;
2236 static time_t prev_tod
= 0;
2237 static hrtime_t prev_tick
= 0;
2238 static long dtick_avg
= TOD_REF_FREQ
;
2240 int cpr_resume_done
= 0;
2241 int dr_resume_done
= 0;
2243 hrtime_t tick
= gethrtime();
2245 ASSERT(MUTEX_HELD(&tod_lock
));
2248 * tod_validate_enable is patchable via /etc/system.
2249 * If TOD is already faulted, or if TOD validation is deferred,
2250 * there is nothing to do.
2252 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2253 tod_validate_deferred
) {
2258 * If this is the first time through, we just need to save the tod
2259 * we were called with and hrtime so we can use them next time to
2260 * validate tod_get().
2270 * Handle any flags that have been turned on by tod_status_set().
2271 * In the case where a tod_set() is done and then a subsequent
2272 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2273 * true), we treat the TOD_GET_FAILED with precedence by switching
2274 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2275 * until such time as tod_get() completes successfully.
2277 if (tod_status_flag
& TOD_GET_FAILED
) {
2279 * tod_get() has encountered an issue, possibly transitory,
2280 * when reading TOD. We'll just return the incoming tod
2281 * value (which is actually hrestime.tv_sec in this case)
2282 * and when we get a genuine tod, following a successful
2283 * tod_get(), we can validate using prev_tod and prev_tick.
2285 tod_status_flag
&= ~TOD_GET_FAILED
;
2287 } else if (tod_status_flag
& TOD_SET_DONE
) {
2289 * TOD has been modified. Just before the TOD was written,
2290 * tod_set_prev() saved tod and hrtime; we can now use
2291 * those values, prev_set_tod and prev_set_tick, to validate
2292 * the incoming tod that's just been read.
2294 prev_tod
= prev_set_tod
;
2295 prev_tick
= prev_set_tick
;
2296 dtick_avg
= TOD_REF_FREQ
;
2297 tod_status_flag
&= ~TOD_SET_DONE
;
2299 * If a tod_set() preceded a cpr_suspend() without an
2300 * intervening tod_validate(), we need to ensure that a
2301 * TOD_JUMPED condition is ignored.
2302 * Note this isn't a concern in the case of DR as we've
2303 * just reassigned dtick_avg, above.
2305 if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2306 cpr_resume_done
= 1;
2307 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2309 } else if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2311 * The system's coming back from a checkpoint resume.
2313 cpr_resume_done
= 1;
2314 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2316 * We need to handle the possibility of a CPR suspend
2317 * operation having been initiated whilst a DR event was
2320 if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2322 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2324 } else if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2326 * A Dynamic Reconfiguration event has taken place.
2329 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2333 switch (tod_unit_test
) {
2334 case 1: /* for testing jumping tod */
2335 tod
+= tod_test_injector
;
2338 case 2: /* for testing stuck tod bit */
2339 tod
|= 1 << tod_test_injector
;
2342 case 3: /* for testing stalled tod */
2346 case 4: /* reset tod fault status */
2347 (void) tod_fault(TOD_NOFAULT
, 0);
2354 diff_tod
= tod
- prev_tod
;
2355 diff_tick
= tick
- prev_tick
;
2357 ASSERT(diff_tick
>= 0);
2360 /* ERROR - tod reversed */
2361 tod_bad
= TOD_REVERSED
;
2362 off
= (int)(prev_tod
- tod
);
2363 } else if (diff_tod
== 0) {
2364 /* tod did not advance */
2365 if (diff_tick
> TOD_STALL_THRESHOLD
) {
2366 /* ERROR - tod stalled */
2367 tod_bad
= TOD_STALLED
;
2370 * Make sure we don't update prev_tick
2371 * so that diff_tick is calculated since
2372 * the first diff_tod == 0
2377 /* calculate dtick */
2378 dtick
= diff_tick
/ diff_tod
;
2380 /* update dtick averages */
2381 dtick_avg
+= ((dtick
- dtick_avg
) / TOD_FILTER_N
);
2384 * Calculate dtick_delta as
2385 * variation from reference freq in quartiles
2387 dtick_delta
= (dtick_avg
- TOD_REF_FREQ
) /
2388 (TOD_REF_FREQ
>> 2);
2391 * Even with a perfectly functioning TOD device,
2392 * when the number of elapsed seconds is low the
2393 * algorithm can calculate a rate that is beyond
2394 * tolerance, causing an error. The algorithm is
2395 * inaccurate when elapsed time is low (less than
2399 if (dtick
< TOD_JUMP_THRESHOLD
) {
2401 * If we've just done a CPR resume, we detect
2402 * a jump in the TOD but, actually, what's
2403 * happened is that the TOD has been increasing
2404 * whilst the system was suspended and the tick
2405 * count hasn't kept up. We consider the first
2406 * occurrence of this after a resume as normal
2407 * and ignore it; otherwise, in a non-resume
2408 * case, we regard it as a TOD problem.
2410 if (!cpr_resume_done
) {
2411 /* ERROR - tod jumped */
2412 tod_bad
= TOD_JUMPED
;
2413 off
= (int)diff_tod
;
2418 * If we've just done a DR resume, dtick_avg
2419 * can go a bit askew so we reset it and carry
2420 * on; otherwise, the TOD is in error.
2422 if (dr_resume_done
) {
2423 dtick_avg
= TOD_REF_FREQ
;
2425 /* ERROR - change in clock rate */
2426 tod_bad
= TOD_RATECHANGED
;
2432 if (tod_bad
!= TOD_NOFAULT
) {
2433 (void) tod_fault(tod_bad
, off
);
2436 * Disable dosynctodr since we are going to fault
2437 * the TOD chip anyway here
2442 * Set tod to the correct value from hrestime
2444 tod
= hrestime
.tv_sec
;
2453 calcloadavg(int nrun
, uint64_t *hp_ave
)
2455 static int64_t f
[3] = { 135, 27, 9 };
2460 * Compute load average over the last 1, 5, and 15 minutes
2461 * (60, 300, and 900 seconds). The constants in f[3] are for
2462 * exponential decay:
2463 * (1 - exp(-1/60)) << 13 = 135,
2464 * (1 - exp(-1/300)) << 13 = 27,
2465 * (1 - exp(-1/900)) << 13 = 9.
2469 * a little hoop-jumping to avoid integer overflow
2471 for (i
= 0; i
< 3; i
++) {
2472 q
= (hp_ave
[i
] >> 16) << 7;
2473 r
= (hp_ave
[i
] & 0xffff) << 7;
2474 hp_ave
[i
] += ((nrun
- q
) * f
[i
] - ((r
* f
[i
]) >> 16)) >> 4;
2479 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2480 * calculate the value of lbolt according to the current mode. In the event
2481 * driven mode (the default), lbolt is calculated by dividing the current hires
2482 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2483 * an internal variable is incremented at each firing of the lbolt cyclic
2484 * and returned by lbolt_cyclic_driven().
2486 * The system will transition from event to cyclic driven mode when the number
2487 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2488 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2489 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2490 * causing enough activity to cross the thresholds.
2493 lbolt_bootstrap(void)
2500 lbolt_ev_to_cyclic(caddr_t arg1
, caddr_t arg2
)
2505 ASSERT(lbolt_hybrid
!= lbolt_cyclic_driven
);
2510 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2513 * Align the next expiration to a clock tick boundary.
2515 exp
= ts
+ nsec_per_tick
- 1;
2516 exp
= (exp
/nsec_per_tick
) * nsec_per_tick
;
2518 ret
= cyclic_reprogram(lb_info
->id
.lbi_cyclic_id
, exp
);
2521 lbolt_hybrid
= lbolt_cyclic_driven
;
2522 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2523 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2527 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2534 lbolt_event_driven(void)
2538 int ret
, cpu
= CPU
->cpu_seqid
;
2543 ASSERT(nsec_per_tick
> 0);
2544 lb
= (ts
/nsec_per_tick
);
2547 * Switch to cyclic mode if the number of calls to this routine
2548 * has reached the threshold within the interval.
2550 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) < lb_info
->lbi_thresh_interval
) {
2552 if (--lb_cpu
[cpu
].lbc_counter
== 0) {
2554 * Reached the threshold within the interval, reset
2555 * the usage statistics.
2557 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2558 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2561 * Make sure only one thread reprograms the
2562 * lbolt cyclic and changes the mode.
2564 if (panicstr
== NULL
&&
2565 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2567 if (lbolt_hybrid
== lbolt_cyclic_driven
) {
2568 ret
= atomic_dec_32_nv(
2569 &lb_info
->lbi_token
);
2572 lbolt_softint_post();
2578 * Exceeded the interval, reset the usage statistics.
2580 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2581 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2584 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2586 return (lb
- lb_info
->lbi_debug_time
);
2590 lbolt_cyclic_driven(void)
2592 int64_t lb
= lb_info
->lbi_internal
;
2596 * If a CPU has already prevented the lbolt cyclic from deactivating
2597 * itself, don't bother tracking the usage. Otherwise check if we're
2598 * within the interval and how the per CPU counter is doing.
2600 if (lb_info
->lbi_cyc_deactivate
) {
2601 cpu
= CPU
->cpu_seqid
;
2602 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) <
2603 lb_info
->lbi_thresh_interval
) {
2605 if (lb_cpu
[cpu
].lbc_counter
== 0)
2607 * Reached the threshold within the interval,
2608 * prevent the lbolt cyclic from turning itself
2611 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2613 lb_cpu
[cpu
].lbc_counter
--;
2616 * Only reset the usage statistics when we have
2617 * exceeded the interval.
2619 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2620 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2624 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2626 return (lb
- lb_info
->lbi_debug_time
);
2630 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2631 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2632 * It is inactive by default, and will be activated when switching from event
2633 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2634 * by lbolt_cyclic_driven().
2641 lb_info
->lbi_internal
++;
2643 if (!lbolt_cyc_only
) {
2645 if (lb_info
->lbi_cyc_deactivate
) {
2647 * Switching from cyclic to event driven mode.
2649 if (panicstr
== NULL
&&
2650 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2652 if (lbolt_hybrid
== lbolt_event_driven
) {
2653 ret
= atomic_dec_32_nv(
2654 &lb_info
->lbi_token
);
2661 lbolt_hybrid
= lbolt_event_driven
;
2662 ret
= cyclic_reprogram(
2663 lb_info
->id
.lbi_cyclic_id
,
2669 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2675 * The lbolt cyclic should not try to deactivate itself before
2676 * the sampling period has elapsed.
2678 if (lb_info
->lbi_internal
- lb_info
->lbi_cyc_deac_start
>=
2679 lb_info
->lbi_thresh_interval
) {
2680 lb_info
->lbi_cyc_deactivate
= B_TRUE
;
2681 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2687 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2688 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2689 * called by the KDI system claim callbacks to record a hires timestamp at
2690 * debug enter time. lbolt_debug_return() is called by the sistem release
2691 * callbacks to account for the time spent in the debugger. The value is then
2692 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2693 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2696 lbolt_debug_entry(void)
2698 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2699 ASSERT(lb_info
!= NULL
);
2700 lb_info
->lbi_debug_ts
= gethrtime();
2705 * Calculate the time spent in the debugger and add it to the lbolt info
2706 * structure. We also update the internal lbolt value in case we were in
2707 * cyclic driven mode going in.
2710 lbolt_debug_return(void)
2714 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2715 ASSERT(lb_info
!= NULL
);
2716 ASSERT(nsec_per_tick
> 0);
2719 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2720 lb_info
->lbi_debug_time
+=
2721 ((ts
- lb_info
->lbi_debug_ts
)/nsec_per_tick
);
2723 lb_info
->lbi_debug_ts
= 0;