4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * Copyright 2023 Oxide Computer Company
31 #include <sys/param.h>
32 #include <sys/t_lock.h>
33 #include <sys/types.h>
34 #include <sys/tuneable.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/cpuvar.h>
41 #include <sys/callo.h>
44 #include <sys/cmn_err.h>
46 #include <sys/vmsystm.h>
47 #include <sys/class.h>
49 #include <sys/debug.h>
50 #include <sys/vtrace.h>
52 #include <sys/atomic.h>
53 #include <sys/dumphdr.h>
54 #include <sys/archsystm.h>
55 #include <sys/fs/swapnode.h>
56 #include <sys/panic.h>
58 #include <sys/msacct.h>
59 #include <sys/mem_cage.h>
64 #include <sys/cyclic.h>
65 #include <sys/cpupart.h>
69 #include <sys/ddi_periodic.h>
70 #include <sys/random.h>
71 #include <sys/modctl.h>
77 #include <sys/timex.h>
78 #include <sys/inttypes.h>
80 #include <sys/sunddi.h>
81 #include <sys/clock_impl.h>
84 * clock() is called straight from the clock cyclic; see clock_init().
92 extern kcondvar_t fsflush_cv
;
93 extern sysinfo_t sysinfo
;
94 extern vminfo_t vminfo
;
95 extern int idleswtch
; /* flag set while idle in pswtch() */
96 extern hrtime_t
volatile devinfo_freeze
;
99 * high-precision avenrun values. These are needed to make the
100 * regular avenrun values accurate.
102 static uint64_t hp_avenrun
[3];
103 int avenrun
[3]; /* FSCALED average run queue lengths */
104 time_t time
; /* time in seconds since 1970 - for compatibility only */
106 static struct loadavg_s loadavg
;
108 * Phase/frequency-lock loop (PLL/FLL) definitions
110 * The following variables are read and set by the ntp_adjtime() system
113 * time_state shows the state of the system clock, with values defined
114 * in the timex.h header file.
116 * time_status shows the status of the system clock, with bits defined
117 * in the timex.h header file.
119 * time_offset is used by the PLL/FLL to adjust the system time in small
122 * time_constant determines the bandwidth or "stiffness" of the PLL.
124 * time_tolerance determines maximum frequency error or tolerance of the
125 * CPU clock oscillator and is a property of the architecture; however,
126 * in principle it could change as result of the presence of external
127 * discipline signals, for instance.
129 * time_precision is usually equal to the kernel tick variable; however,
130 * in cases where a precision clock counter or external clock is
131 * available, the resolution can be much less than this and depend on
132 * whether the external clock is working or not.
134 * time_maxerror is initialized by a ntp_adjtime() call and increased by
135 * the kernel once each second to reflect the maximum error bound
138 * time_esterror is set and read by the ntp_adjtime() call, but
139 * otherwise not used by the kernel.
141 int32_t time_state
= TIME_OK
; /* clock state */
142 int32_t time_status
= STA_UNSYNC
; /* clock status bits */
143 int32_t time_offset
= 0; /* time offset (us) */
144 int32_t time_constant
= 0; /* pll time constant */
145 int32_t time_tolerance
= MAXFREQ
; /* frequency tolerance (scaled ppm) */
146 int32_t time_precision
= 1; /* clock precision (us) */
147 int32_t time_maxerror
= MAXPHASE
; /* maximum error (us) */
148 int32_t time_esterror
= MAXPHASE
; /* estimated error (us) */
151 * The following variables establish the state of the PLL/FLL and the
152 * residual time and frequency offset of the local clock. The scale
153 * factors are defined in the timex.h header file.
155 * time_phase and time_freq are the phase increment and the frequency
156 * increment, respectively, of the kernel time variable.
158 * time_freq is set via ntp_adjtime() from a value stored in a file when
159 * the synchronization daemon is first started. Its value is retrieved
160 * via ntp_adjtime() and written to the file about once per hour by the
163 * time_adj is the adjustment added to the value of tick at each timer
164 * interrupt and is recomputed from time_phase and time_freq at each
167 * time_reftime is the second's portion of the system time at the last
168 * call to ntp_adjtime(). It is used to adjust the time_freq variable
169 * and to increase the time_maxerror as the time since last update
172 int32_t time_phase
= 0; /* phase offset (scaled us) */
173 int32_t time_freq
= 0; /* frequency offset (scaled ppm) */
174 int32_t time_adj
= 0; /* tick adjust (scaled 1 / hz) */
175 int32_t time_reftime
= 0; /* time at last adjustment (s) */
178 * The scale factors of the following variables are defined in the
179 * timex.h header file.
181 * pps_time contains the time at each calibration interval, as read by
182 * microtime(). pps_count counts the seconds of the calibration
183 * interval, the duration of which is nominally pps_shift in powers of
186 * pps_offset is the time offset produced by the time median filter
187 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
190 * pps_freq is the frequency offset produced by the frequency median
191 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
194 * pps_usec is latched from a high resolution counter or external clock
195 * at pps_time. Here we want the hardware counter contents only, not the
196 * contents plus the time_tv.usec as usual.
198 * pps_valid counts the number of seconds since the last PPS update. It
199 * is used as a watchdog timer to disable the PPS discipline should the
200 * PPS signal be lost.
202 * pps_glitch counts the number of seconds since the beginning of an
203 * offset burst more than tick/2 from current nominal offset. It is used
204 * mainly to suppress error bursts due to priority conflicts between the
205 * PPS interrupt and timer interrupt.
207 * pps_intcnt counts the calibration intervals for use in the interval-
208 * adaptation algorithm. It's just too complicated for words.
210 struct timeval pps_time
; /* kernel time at last interval */
211 int32_t pps_tf
[] = {0, 0, 0}; /* pps time offset median filter (us) */
212 int32_t pps_offset
= 0; /* pps time offset (us) */
213 int32_t pps_jitter
= MAXTIME
; /* time dispersion (jitter) (us) */
214 int32_t pps_ff
[] = {0, 0, 0}; /* pps frequency offset median filter */
215 int32_t pps_freq
= 0; /* frequency offset (scaled ppm) */
216 int32_t pps_stabil
= MAXFREQ
; /* frequency dispersion (scaled ppm) */
217 int32_t pps_usec
= 0; /* microsec counter at last interval */
218 int32_t pps_valid
= PPS_VALID
; /* pps signal watchdog counter */
219 int32_t pps_glitch
= 0; /* pps signal glitch counter */
220 int32_t pps_count
= 0; /* calibration interval counter (s) */
221 int32_t pps_shift
= PPS_SHIFT
; /* interval duration (s) (shift) */
222 int32_t pps_intcnt
= 0; /* intervals at current duration */
225 * PPS signal quality monitors
227 * pps_jitcnt counts the seconds that have been discarded because the
228 * jitter measured by the time median filter exceeds the limit MAXTIME
231 * pps_calcnt counts the frequency calibration intervals, which are
232 * variable from 4 s to 256 s.
234 * pps_errcnt counts the calibration intervals which have been discarded
235 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
236 * calibration interval jitter exceeds two ticks.
238 * pps_stbcnt counts the calibration intervals that have been discarded
239 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
241 int32_t pps_jitcnt
= 0; /* jitter limit exceeded */
242 int32_t pps_calcnt
= 0; /* calibration intervals */
243 int32_t pps_errcnt
= 0; /* calibration errors */
244 int32_t pps_stbcnt
= 0; /* stability limit exceeded */
249 * Hybrid lbolt implementation:
251 * The service historically provided by the lbolt and lbolt64 variables has
252 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
253 * original symbols removed from the system. The once clock driven variables are
254 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
255 * the appropriate clock resolution. The default event driven implementation is
256 * complemented by a cyclic driven one, active only during periods of intense
257 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
258 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
259 * rely on the original low cost of consulting a memory position.
261 * The implementation uses the number of calls to these routines and the
262 * frequency of these to determine when to transition from event to cyclic
263 * driven and vice-versa. These values are kept on a per CPU basis for
264 * scalability reasons and to prevent CPUs from constantly invalidating a single
265 * cache line when modifying a global variable. The transition from event to
266 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
267 * can cause such transition.
269 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
270 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
271 * lbolt_cyclic_driven() according to the current mode. When the thresholds
272 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
273 * fire at a nsec_per_tick interval and increment an internal variable at
274 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
275 * will simply return the value of such variable. lbolt_cyclic() will attempt
276 * to shut itself off at each threshold interval (sampling period for calls
277 * to the DDI lbolt routines), and return to the event driven mode, but will
278 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
280 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
281 * for the cyclic subsystem to be intialized.
284 int64_t lbolt_bootstrap(void);
285 int64_t lbolt_event_driven(void);
286 int64_t lbolt_cyclic_driven(void);
287 int64_t (*lbolt_hybrid
)(void) = lbolt_bootstrap
;
288 uint_t
lbolt_ev_to_cyclic(caddr_t
, caddr_t
);
291 * lbolt's cyclic, installed by clock_init().
293 static void lbolt_cyclic(void);
296 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
297 * from switching back to event driven, once it reaches cyclic mode.
299 static boolean_t lbolt_cyc_only
= B_FALSE
;
302 * Cache aligned, per CPU structure with lbolt usage statistics.
304 static lbolt_cpu_t
*lb_cpu
;
307 * Single, cache aligned, structure with all the information required by
308 * the lbolt implementation.
310 lbolt_info_t
*lb_info
;
313 int one_sec
= 1; /* turned on once every second */
314 static int fsflushcnt
; /* counter for t_fsflushr */
315 int dosynctodr
= 1; /* patchable; enable/disable sync to TOD chip */
316 int tod_needsync
= 0; /* need to sync tod chip with software time */
317 static int tod_broken
= 0; /* clock chip doesn't work */
318 time_t boot_time
= 0; /* Boot time in seconds since 1970 */
319 cyclic_id_t clock_cyclic
; /* clock()'s cyclic_id */
320 cyclic_id_t deadman_cyclic
; /* deadman()'s cyclic_id */
322 extern void clock_tick_schedule(int);
323 extern void set_freemem(void);
324 extern void pageout_deadman(void);
326 static int lgrp_ticks
; /* counter to schedule lgrp load calcs */
329 * for tod fault detection
331 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
332 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
333 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
334 #define TOD_FILTER_N 4
335 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
336 static enum tod_fault_type tod_faulted
= TOD_NOFAULT
;
338 static int tod_status_flag
= 0; /* used by tod_validate() */
340 static hrtime_t prev_set_tick
= 0; /* gethrtime() prior to tod_set() */
341 static time_t prev_set_tod
= 0; /* tv_sec value passed to tod_set() */
343 /* patchable via /etc/system */
344 int tod_validate_enable
= 1;
346 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
347 int delay_from_interrupt_diagnose
= 0;
348 volatile uint32_t delay_from_interrupt_msg
= 20;
351 * On non-SPARC systems, TOD validation must be deferred until gethrtime
352 * returns non-zero values (after mach_clkinit's execution).
353 * On SPARC systems, it must be deferred until after hrtime_base
354 * and hres_last_tick are set (in the first invocation of hres_tick).
355 * Since in both cases the prerequisites occur before the invocation of
356 * tod_get() in clock(), the deferment is lifted there.
358 static boolean_t tod_validate_deferred
= B_TRUE
;
361 * tod_fault_table[] must be aligned with
362 * enum tod_fault_type in systm.h
364 static char *tod_fault_table
[] = {
365 "Reversed", /* TOD_REVERSED */
366 "Stalled", /* TOD_STALLED */
367 "Jumped", /* TOD_JUMPED */
368 "Changed in Clock Rate", /* TOD_RATECHANGED */
369 "Is Read-Only" /* TOD_RDONLY */
371 * no strings needed for TOD_NOFAULT
376 * test hook for tod broken detection in tod_validate
378 int tod_unit_test
= 0;
379 time_t tod_test_injector
;
381 #define CLOCK_ADJ_HIST_SIZE 4
383 static int adj_hist_entry
;
385 int64_t clock_adj_hist
[CLOCK_ADJ_HIST_SIZE
];
387 static void calcloadavg(int, uint64_t *);
388 static int genloadavg(struct loadavg_s
*);
389 static void loadavg_update();
391 void (*cmm_clock_callout
)() = NULL
;
392 void (*cpucaps_clock_callout
)() = NULL
;
394 extern clock_t clock_tick_proc_max
;
396 static int64_t deadman_counter
= 0;
412 clock_t now
= LBOLT_NO_ACCOUNT
; /* current tick */
418 * Make sure that 'freemem' do not drift too far from the truth
424 * Before the section which is repeated is executed, we do
425 * the time delta processing which occurs every clock tick
427 * There is additional processing which happens every time
428 * the nanosecond counter rolls over which is described
429 * below - see the section which begins with : if (one_sec)
431 * This section marks the beginning of the precision-kernel
434 * First, compute the phase adjustment. If the low-order bits
435 * (time_phase) of the update overflow, bump the higher order
436 * bits (time_update).
438 time_phase
+= time_adj
;
439 if (time_phase
<= -FINEUSEC
) {
440 ltemp
= -time_phase
/ SCALE_PHASE
;
441 time_phase
+= ltemp
* SCALE_PHASE
;
443 timedelta
-= ltemp
* (NANOSEC
/MICROSEC
);
445 } else if (time_phase
>= FINEUSEC
) {
446 ltemp
= time_phase
/ SCALE_PHASE
;
447 time_phase
-= ltemp
* SCALE_PHASE
;
449 timedelta
+= ltemp
* (NANOSEC
/MICROSEC
);
454 * End of precision-kernel code fragment which is processed
455 * every timer interrupt.
457 * Continue with the interrupt processing as scheduled.
460 * Count the number of runnable threads and the number waiting
461 * for some form of I/O to complete -- gets added to
462 * sysinfo.waiting. To know the state of the system, must add
463 * wait counts from all CPUs. Also add up the per-partition
470 * keep track of when to update lgrp/part loads
474 if (lgrp_ticks
++ >= hz
/ 10) {
486 * First count the threads waiting on kpreempt queues in each
490 cpupart
= cp_list_head
;
492 uint_t cpupart_nrunnable
= cpupart
->cp_kp_queue
.disp_nrunnable
;
494 cpupart
->cp_updates
++;
495 nrunnable
+= cpupart_nrunnable
;
496 cpupart
->cp_nrunnable_cum
+= cpupart_nrunnable
;
498 cpupart
->cp_nrunning
= 0;
499 cpupart
->cp_nrunnable
= cpupart_nrunnable
;
501 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
504 /* Now count the per-CPU statistics. */
507 uint_t cpu_nrunnable
= cp
->cpu_disp
->disp_nrunnable
;
509 nrunnable
+= cpu_nrunnable
;
510 cpupart
= cp
->cpu_part
;
511 cpupart
->cp_nrunnable_cum
+= cpu_nrunnable
;
513 cpupart
->cp_nrunnable
+= cpu_nrunnable
;
515 * Update user, system, and idle cpu times.
517 cpupart
->cp_nrunning
++;
519 * w_io is used to update sysinfo.waiting during
520 * one_second processing below. Only gather w_io
521 * information when we walk the list of cpus if we're
522 * going to perform one_second processing.
524 w_io
+= CPU_STATS(cp
, sys
.iowait
);
527 if (one_sec
&& (cp
->cpu_flags
& CPU_EXISTS
)) {
529 hrtime_t intracct
, intrused
;
530 const hrtime_t maxnsec
= 1000000000;
531 const int precision
= 100;
534 * Estimate interrupt load on this cpu each second.
535 * Computes cpu_intrload as %utilization (0-99).
538 /* add up interrupt time from all micro states */
539 for (intracct
= 0, i
= 0; i
< NCMSTATES
; i
++)
540 intracct
+= cp
->cpu_intracct
[i
];
541 scalehrtime(&intracct
);
543 /* compute nsec used in the past second */
544 intrused
= intracct
- cp
->cpu_intrlast
;
545 cp
->cpu_intrlast
= intracct
;
547 /* limit the value for safety (and the first pass) */
548 if (intrused
>= maxnsec
)
549 intrused
= maxnsec
- 1;
551 /* calculate %time in interrupt */
552 load
= (precision
* intrused
) / maxnsec
;
553 ASSERT(load
>= 0 && load
< precision
);
554 change
= cp
->cpu_intrload
- load
;
556 /* jump to new max, or decay the old max */
558 cp
->cpu_intrload
= load
;
560 cp
->cpu_intrload
-= (change
+ 3) / 4;
562 DTRACE_PROBE3(cpu_intrload
,
569 (cp
->cpu_flags
& CPU_EXISTS
)) {
571 * When updating the lgroup's load average,
572 * account for the thread running on the CPU.
573 * If the CPU is the current one, then we need
574 * to account for the underlying thread which
575 * got the clock interrupt not the thread that is
576 * handling the interrupt and caculating the load
584 * Account for the load average for this thread if
585 * it isn't the idle thread or it is on the interrupt
586 * stack and not the current CPU handling the clock
589 if ((t
&& t
!= cp
->cpu_idle_thread
) || (CPU
!= cp
&&
591 if (t
->t_lpl
== cp
->cpu_lpl
) {
596 * This is a remote thread, charge it
597 * against its home lgroup. Note that
598 * we notice that a thread is remote
599 * only if it's currently executing.
600 * This is a reasonable approximation,
601 * since queued remote threads are rare.
602 * Note also that if we didn't charge
603 * it to its home lgroup, remote
604 * execution would often make a system
605 * appear balanced even though it was
606 * not, and thread placement/migration
607 * would often not be done correctly.
609 lgrp_loadavg(t
->t_lpl
,
610 LGRP_LOADAVG_IN_THREAD_MAX
, 0);
613 lgrp_loadavg(cp
->cpu_lpl
,
614 cpu_nrunnable
* LGRP_LOADAVG_IN_THREAD_MAX
, 1);
616 } while ((cp
= cp
->cpu_next
) != cpu_list
);
618 clock_tick_schedule(one_sec
);
621 * Check for a callout that needs be called from the clock
622 * thread to support the membership protocol in a clustered
623 * system. Copy the function pointer so that we can reset
624 * this to NULL if needed.
626 if ((funcp
= cmm_clock_callout
) != NULL
)
629 if ((funcp
= cpucaps_clock_callout
) != NULL
)
633 * Wakeup the cageout thread waiters once per second.
645 * Beginning of precision-kernel code fragment executed
648 * On rollover of the second the phase adjustment to be
649 * used for the next second is calculated. Also, the
650 * maximum error is increased by the tolerance. If the
651 * PPS frequency discipline code is present, the phase is
652 * increased to compensate for the CPU clock oscillator
655 * On a 32-bit machine and given parameters in the timex.h
656 * header file, the maximum phase adjustment is +-512 ms
657 * and maximum frequency offset is (a tad less than)
658 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
660 time_maxerror
+= time_tolerance
/ SCALE_USEC
;
663 * Leap second processing. If in leap-insert state at
664 * the end of the day, the system clock is set back one
665 * second; if in leap-delete state, the system clock is
666 * set ahead one second. The microtime() routine or
667 * external clock driver will insure that reported time
668 * is always monotonic. The ugly divides should be
671 switch (time_state
) {
674 if (time_status
& STA_INS
)
675 time_state
= TIME_INS
;
676 else if (time_status
& STA_DEL
)
677 time_state
= TIME_DEL
;
681 if (hrestime
.tv_sec
% 86400 == 0) {
685 time_state
= TIME_OOP
;
690 if ((hrestime
.tv_sec
+ 1) % 86400 == 0) {
694 time_state
= TIME_WAIT
;
699 time_state
= TIME_WAIT
;
703 if (!(time_status
& (STA_INS
| STA_DEL
)))
704 time_state
= TIME_OK
;
710 * Compute the phase adjustment for the next second. In
711 * PLL mode, the offset is reduced by a fixed factor
712 * times the time constant. In FLL mode the offset is
713 * used directly. In either mode, the maximum phase
714 * adjustment for each second is clamped so as to spread
715 * the adjustment over not more than the number of
716 * seconds between updates.
718 if (time_offset
== 0)
720 else if (time_offset
< 0) {
721 lltemp
= -time_offset
;
722 if (!(time_status
& STA_FLL
)) {
723 if ((1 << time_constant
) >= SCALE_KG
)
724 lltemp
*= (1 << time_constant
) /
727 lltemp
= (lltemp
/ SCALE_KG
) >>
730 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
731 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
732 time_offset
+= lltemp
;
733 time_adj
= -(lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
735 lltemp
= time_offset
;
736 if (!(time_status
& STA_FLL
)) {
737 if ((1 << time_constant
) >= SCALE_KG
)
738 lltemp
*= (1 << time_constant
) /
741 lltemp
= (lltemp
/ SCALE_KG
) >>
744 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
745 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
746 time_offset
-= lltemp
;
747 time_adj
= (lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
751 * Compute the frequency estimate and additional phase
752 * adjustment due to frequency error for the next
753 * second. When the PPS signal is engaged, gnaw on the
754 * watchdog counter and update the frequency computed by
755 * the pll and the PPS signal.
758 if (pps_valid
== PPS_VALID
) {
759 pps_jitter
= MAXTIME
;
760 pps_stabil
= MAXFREQ
;
761 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
762 STA_PPSWANDER
| STA_PPSERROR
);
764 lltemp
= time_freq
+ pps_freq
;
767 time_adj
+= (lltemp
* SCALE_PHASE
) / (SCALE_USEC
* hz
);
770 * End of precision kernel-code fragment
772 * The section below should be modified if we are planning
773 * to use NTP for synchronization.
775 * Note: the clock synchronization code now assumes
777 * - if dosynctodr is 1, then compute the drift between
778 * the tod chip and software time and adjust one or
779 * the other depending on the circumstances
781 * - if dosynctodr is 0, then the tod chip is independent
782 * of the software clock and should not be adjusted,
783 * but allowed to free run. this allows NTP to sync.
784 * hrestime without any interference from the tod chip.
787 tod_validate_deferred
= B_FALSE
;
788 mutex_enter(&tod_lock
);
790 drift
= tod
.tv_sec
- hrestime
.tv_sec
;
791 absdrift
= (drift
>= 0) ? drift
: -drift
;
792 if (tod_needsync
|| absdrift
> 1) {
795 if (!tod_broken
&& tod_faulted
== TOD_NOFAULT
) {
798 membar_enter(); /* hrestime visible */
807 if (tod_needsync
|| !dosynctodr
) {
816 * If the drift is 2 seconds on the
817 * money, then the TOD is adjusting
818 * the clock; record that.
820 clock_adj_hist
[adj_hist_entry
++ %
821 CLOCK_ADJ_HIST_SIZE
] = now
;
823 timedelta
= (int64_t)drift
*NANOSEC
;
829 time
= gethrestime_sec(); /* for crusty old kmem readers */
830 mutex_exit(&tod_lock
);
833 * Some drivers still depend on this... XXX
835 cv_broadcast(&lbolt_cv
);
837 vminfo
.freemem
+= freemem
;
839 pgcnt_t maxswap
, resv
, free
;
841 MAX((spgcnt_t
)(availrmem
- swapfs_minfree
), 0);
843 maxswap
= k_anoninfo
.ani_mem_resv
+
844 k_anoninfo
.ani_max
+avail
;
845 /* Update ani_free */
847 free
= k_anoninfo
.ani_free
+ avail
;
848 resv
= k_anoninfo
.ani_phys_resv
+
849 k_anoninfo
.ani_mem_resv
;
851 vminfo
.swap_resv
+= resv
;
852 /* number of reserved and allocated pages */
855 cmn_err(CE_WARN
, "clock: maxswap < free");
857 cmn_err(CE_WARN
, "clock: maxswap < resv");
859 vminfo
.swap_alloc
+= maxswap
- free
;
860 vminfo
.swap_avail
+= maxswap
- resv
;
861 vminfo
.swap_free
+= free
;
865 sysinfo
.runque
+= nrunnable
;
869 sysinfo
.swpque
+= nswapped
;
872 sysinfo
.waiting
+= w_io
;
876 * Wake up fsflush to write out DELWRI
877 * buffers, dirty pages and other cached
878 * administrative data, e.g. inodes.
880 if (--fsflushcnt
<= 0) {
881 fsflushcnt
= tune
.t_fsflushr
;
882 cv_signal(&fsflush_cv
);
886 calcloadavg(genloadavg(&loadavg
), hp_avenrun
);
887 for (i
= 0; i
< 3; i
++)
889 * At the moment avenrun[] can only hold 31
890 * bits of load average as it is a signed
891 * int in the API. We need to ensure that
892 * hp_avenrun[i] >> (16 - FSHIFT) will not be
893 * too large. If it is, we put the largest value
894 * that we can use into avenrun[i]. This is
895 * kludgey, but about all we can do until we
896 * avenrun[] is declared as an array of uint64[]
898 if (hp_avenrun
[i
] < ((uint64_t)1<<(31+16-FSHIFT
)))
899 avenrun
[i
] = (int32_t)(hp_avenrun
[i
] >>
902 avenrun
[i
] = 0x7fffffff;
904 cpupart
= cp_list_head
;
906 calcloadavg(genloadavg(&cpupart
->cp_loadavg
),
907 cpupart
->cp_hp_avenrun
);
908 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
911 * Wake up the swapper thread if necessary.
914 (runout
&& (avefree
< desfree
|| wake_sched_sec
))) {
917 if (t
->t_state
== TS_STOPPED
) {
922 t
->t_schedflag
&= ~TS_ALLSTART
;
923 THREAD_TRANSITION(t
);
931 * Wake up the swapper if any high priority swapped-out threads
932 * became runable during the last tick.
937 if (t
->t_state
== TS_STOPPED
) {
942 t
->t_schedflag
&= ~TS_ALLSTART
;
943 THREAD_TRANSITION(t
);
953 cyc_handler_t clk_hdlr
, lbolt_hdlr
;
954 cyc_time_t clk_when
, lbolt_when
;
959 * Setup handler and timer for the clock cyclic.
961 clk_hdlr
.cyh_func
= (cyc_func_t
)clock
;
962 clk_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
963 clk_hdlr
.cyh_arg
= NULL
;
965 clk_when
.cyt_when
= 0;
966 clk_when
.cyt_interval
= nsec_per_tick
;
969 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
970 * interval to satisfy performance needs of the DDI lbolt consumers.
971 * It is off by default.
973 lbolt_hdlr
.cyh_func
= (cyc_func_t
)lbolt_cyclic
;
974 lbolt_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
975 lbolt_hdlr
.cyh_arg
= NULL
;
977 lbolt_when
.cyt_interval
= nsec_per_tick
;
980 * Allocate cache line aligned space for the per CPU lbolt data and
981 * lbolt info structures, and initialize them with their default
982 * values. Note that these structures are also cache line sized.
984 sz
= sizeof (lbolt_info_t
) + CPU_CACHE_COHERENCE_SIZE
;
985 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
986 lb_info
= (lbolt_info_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
988 if (hz
!= HZ_DEFAULT
)
989 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
*
992 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
;
994 lb_info
->lbi_thresh_calls
= LBOLT_THRESH_CALLS
;
996 sz
= (sizeof (lbolt_cpu_t
) * max_ncpus
) + CPU_CACHE_COHERENCE_SIZE
;
997 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
998 lb_cpu
= (lbolt_cpu_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
1000 for (i
= 0; i
< max_ncpus
; i
++)
1001 lb_cpu
[i
].lbc_counter
= lb_info
->lbi_thresh_calls
;
1004 * Install the softint used to switch between event and cyclic driven
1005 * lbolt. We use a soft interrupt to make sure the context of the
1006 * cyclic reprogram call is safe.
1008 lbolt_softint_add();
1011 * Since the hybrid lbolt implementation is based on a hardware counter
1012 * that is reset at every hardware reboot and that we'd like to have
1013 * the lbolt value starting at zero after both a hardware and a fast
1014 * reboot, we calculate the number of clock ticks the system's been up
1015 * and store it in the lbi_debug_time field of the lbolt info structure.
1016 * The value of this field will be subtracted from lbolt before
1019 lb_info
->lbi_internal
= lb_info
->lbi_debug_time
=
1020 (gethrtime()/nsec_per_tick
);
1023 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1024 * and lbolt_debug_{enter,return} use this value as an indication that
1025 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1026 * to either lbolt_{cyclic,event}_driven here signals those code paths
1027 * that the lbolt related structures can be used.
1029 if (lbolt_cyc_only
) {
1030 lbolt_when
.cyt_when
= 0;
1031 lbolt_hybrid
= lbolt_cyclic_driven
;
1033 lbolt_when
.cyt_when
= CY_INFINITY
;
1034 lbolt_hybrid
= lbolt_event_driven
;
1038 * Grab cpu_lock and install all three cyclics.
1040 mutex_enter(&cpu_lock
);
1042 clock_cyclic
= cyclic_add(&clk_hdlr
, &clk_when
);
1043 lb_info
->id
.lbi_cyclic_id
= cyclic_add(&lbolt_hdlr
, &lbolt_when
);
1045 mutex_exit(&cpu_lock
);
1049 * Called before calcloadavg to get 10-sec moving loadavg together
1053 genloadavg(struct loadavg_s
*avgs
)
1056 int spos
; /* starting position */
1057 int cpos
; /* moving current position */
1062 /* 10-second snapshot, calculate first positon */
1063 if (avgs
->lg_len
== 0) {
1066 slen
= avgs
->lg_len
< S_MOVAVG_SZ
? avgs
->lg_len
: S_MOVAVG_SZ
;
1068 spos
= (avgs
->lg_cur
- 1) >= 0 ? avgs
->lg_cur
- 1 :
1069 S_LOADAVG_SZ
+ (avgs
->lg_cur
- 1);
1070 for (i
= hr_avg
= 0; i
< slen
; i
++) {
1071 cpos
= (spos
- i
) >= 0 ? spos
- i
: S_LOADAVG_SZ
+ (spos
- i
);
1072 hr_avg
+= avgs
->lg_loads
[cpos
];
1075 hr_avg
= hr_avg
/ slen
;
1076 avg
= hr_avg
/ (NANOSEC
/ LGRP_LOADAVG_IN_THREAD_MAX
);
1082 * Run every second from clock () to update the loadavg count available to the
1083 * system and cpu-partitions.
1085 * This works by sampling the previous usr, sys, wait time elapsed,
1086 * computing a delta, and adding that delta to the elapsed usr, sys,
1099 loadavg
.lg_total
= 0;
1102 * first pass totals up per-cpu statistics for system and cpu
1107 struct loadavg_s
*lavg
;
1109 lavg
= &cp
->cpu_loadavg
;
1111 cpu_total
= cp
->cpu_acct
[CMS_USER
] +
1112 cp
->cpu_acct
[CMS_SYSTEM
] + cp
->cpu_waitrq
;
1113 /* compute delta against last total */
1114 scalehrtime(&cpu_total
);
1115 prev
= (lavg
->lg_cur
- 1) >= 0 ? lavg
->lg_cur
- 1 :
1116 S_LOADAVG_SZ
+ (lavg
->lg_cur
- 1);
1117 if (lavg
->lg_loads
[prev
] <= 0) {
1118 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1121 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1122 cpu_total
= cpu_total
- lavg
->lg_loads
[prev
];
1127 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1128 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1129 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1131 loadavg
.lg_total
+= cpu_total
;
1132 cp
->cpu_part
->cp_loadavg
.lg_total
+= cpu_total
;
1134 } while ((cp
= cp
->cpu_next
) != cpu_list
);
1136 loadavg
.lg_loads
[loadavg
.lg_cur
] = loadavg
.lg_total
;
1137 loadavg
.lg_cur
= (loadavg
.lg_cur
+ 1) % S_LOADAVG_SZ
;
1138 loadavg
.lg_len
= (loadavg
.lg_len
+ 1) < S_LOADAVG_SZ
?
1139 loadavg
.lg_len
+ 1 : S_LOADAVG_SZ
;
1141 * Second pass updates counts
1143 cpupart
= cp_list_head
;
1146 struct loadavg_s
*lavg
;
1148 lavg
= &cpupart
->cp_loadavg
;
1149 lavg
->lg_loads
[lavg
->lg_cur
] = lavg
->lg_total
;
1151 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1152 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1153 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1155 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
1158 * Third pass totals up per-zone statistics.
1160 zone_loadavg_update();
1164 * clock_update() - local clock update
1166 * This routine is called by ntp_adjtime() to update the local clock
1167 * phase and frequency. The implementation is of an
1168 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1169 * routine computes new time and frequency offset estimates for each
1170 * call. The PPS signal itself determines the new time offset,
1171 * instead of the calling argument. Presumably, calls to
1172 * ntp_adjtime() occur only when the caller believes the local clock
1173 * is valid within some bound (+-128 ms with NTP). If the caller's
1174 * time is far different than the PPS time, an argument will ensue,
1175 * and it's not clear who will lose.
1177 * For uncompensated quartz crystal oscillatores and nominal update
1178 * intervals less than 1024 s, operation should be in phase-lock mode
1179 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1180 * intervals greater than this, operation should be in frequency-lock
1181 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1183 * Note: mutex(&tod_lock) is in effect.
1186 clock_update(int offset
)
1188 int ltemp
, mtemp
, s
;
1190 ASSERT(MUTEX_HELD(&tod_lock
));
1192 if (!(time_status
& STA_PLL
) && !(time_status
& STA_PPSTIME
))
1195 if ((time_status
& STA_PPSTIME
) && (time_status
& STA_PPSSIGNAL
))
1199 * Scale the phase adjustment and clamp to the operating range.
1201 if (ltemp
> MAXPHASE
)
1202 time_offset
= MAXPHASE
* SCALE_UPDATE
;
1203 else if (ltemp
< -MAXPHASE
)
1204 time_offset
= -(MAXPHASE
* SCALE_UPDATE
);
1206 time_offset
= ltemp
* SCALE_UPDATE
;
1209 * Select whether the frequency is to be controlled and in which
1210 * mode (PLL or FLL). Clamp to the operating range. Ugly
1211 * multiply/divide should be replaced someday.
1213 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0)
1214 time_reftime
= hrestime
.tv_sec
;
1216 mtemp
= hrestime
.tv_sec
- time_reftime
;
1217 time_reftime
= hrestime
.tv_sec
;
1219 if (time_status
& STA_FLL
) {
1220 if (mtemp
>= MINSEC
) {
1221 ltemp
= ((time_offset
/ mtemp
) * (SCALE_USEC
/
1224 time_freq
+= ltemp
/ SCALE_KH
;
1227 if (mtemp
< MAXSEC
) {
1230 time_freq
+= (int)(((int64_t)ltemp
*
1231 SCALE_USEC
) / SCALE_KF
)
1232 / (1 << (time_constant
* 2));
1235 if (time_freq
> time_tolerance
)
1236 time_freq
= time_tolerance
;
1237 else if (time_freq
< -time_tolerance
)
1238 time_freq
= -time_tolerance
;
1240 s
= hr_clock_lock();
1246 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1248 * This routine is called at each PPS interrupt in order to discipline
1249 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1250 * and leaves it in a handy spot for the clock() routine. It
1251 * integrates successive PPS phase differences and calculates the
1252 * frequency offset. This is used in clock() to discipline the CPU
1253 * clock oscillator so that intrinsic frequency error is cancelled out.
1254 * The code requires the caller to capture the time and hardware counter
1255 * value at the on-time PPS signal transition.
1257 * Note that, on some Unix systems, this routine runs at an interrupt
1258 * priority level higher than the timer interrupt routine clock().
1259 * Therefore, the variables used are distinct from the clock()
1260 * variables, except for certain exceptions: The PPS frequency pps_freq
1261 * and phase pps_offset variables are determined by this routine and
1262 * updated atomically. The time_tolerance variable can be considered a
1263 * constant, since it is infrequently changed, and then only when the
1264 * PPS signal is disabled. The watchdog counter pps_valid is updated
1265 * once per second by clock() and is atomically cleared in this
1268 * tvp is the time of the last tick; usec is a microsecond count since the
1271 * Note: In Solaris systems, the tick value is actually given by
1272 * usec_per_tick. This is called from the serial driver cdintr(),
1273 * or equivalent, at a high PIL. Because the kernel keeps a
1274 * highresolution time, the following code can accept either
1275 * the traditional argument pair, or the current highres timestamp
1276 * in tvp and zero in usec.
1279 ddi_hardpps(struct timeval
*tvp
, int usec
)
1281 int u_usec
, v_usec
, bigtick
;
1286 * An occasional glitch can be produced when the PPS interrupt
1287 * occurs in the clock() routine before the time variable is
1288 * updated. Here the offset is discarded when the difference
1289 * between it and the last one is greater than tick/2, but not
1290 * if the interval since the first discard exceeds 30 s.
1292 time_status
|= STA_PPSSIGNAL
;
1293 time_status
&= ~(STA_PPSJITTER
| STA_PPSWANDER
| STA_PPSERROR
);
1295 u_usec
= -tvp
->tv_usec
;
1296 if (u_usec
< -(MICROSEC
/2))
1298 v_usec
= pps_offset
- u_usec
;
1301 if (v_usec
> (usec_per_tick
>> 1)) {
1302 if (pps_glitch
> MAXGLITCH
) {
1308 u_usec
= pps_offset
;
1314 * A three-stage median filter is used to help deglitch the pps
1315 * time. The median sample becomes the time offset estimate; the
1316 * difference between the other two samples becomes the time
1317 * dispersion (jitter) estimate.
1319 pps_tf
[2] = pps_tf
[1];
1320 pps_tf
[1] = pps_tf
[0];
1322 if (pps_tf
[0] > pps_tf
[1]) {
1323 if (pps_tf
[1] > pps_tf
[2]) {
1324 pps_offset
= pps_tf
[1]; /* 0 1 2 */
1325 v_usec
= pps_tf
[0] - pps_tf
[2];
1326 } else if (pps_tf
[2] > pps_tf
[0]) {
1327 pps_offset
= pps_tf
[0]; /* 2 0 1 */
1328 v_usec
= pps_tf
[2] - pps_tf
[1];
1330 pps_offset
= pps_tf
[2]; /* 0 2 1 */
1331 v_usec
= pps_tf
[0] - pps_tf
[1];
1334 if (pps_tf
[1] < pps_tf
[2]) {
1335 pps_offset
= pps_tf
[1]; /* 2 1 0 */
1336 v_usec
= pps_tf
[2] - pps_tf
[0];
1337 } else if (pps_tf
[2] < pps_tf
[0]) {
1338 pps_offset
= pps_tf
[0]; /* 1 0 2 */
1339 v_usec
= pps_tf
[1] - pps_tf
[2];
1341 pps_offset
= pps_tf
[2]; /* 1 2 0 */
1342 v_usec
= pps_tf
[1] - pps_tf
[0];
1345 if (v_usec
> MAXTIME
)
1347 v_usec
= (v_usec
<< PPS_AVG
) - pps_jitter
;
1348 pps_jitter
+= v_usec
/ (1 << PPS_AVG
);
1349 if (pps_jitter
> (MAXTIME
>> 1))
1350 time_status
|= STA_PPSJITTER
;
1353 * During the calibration interval adjust the starting time when
1354 * the tick overflows. At the end of the interval compute the
1355 * duration of the interval and the difference of the hardware
1356 * counters at the beginning and end of the interval. This code
1357 * is deliciously complicated by the fact valid differences may
1358 * exceed the value of tick when using long calibration
1359 * intervals and small ticks. Note that the counter can be
1360 * greater than tick if caught at just the wrong instant, but
1361 * the values returned and used here are correct.
1363 bigtick
= (int)usec_per_tick
* SCALE_USEC
;
1364 pps_usec
-= pps_freq
;
1365 if (pps_usec
>= bigtick
)
1366 pps_usec
-= bigtick
;
1368 pps_usec
+= bigtick
;
1371 if (pps_count
< (1 << pps_shift
))
1375 u_usec
= usec
* SCALE_USEC
;
1376 v_usec
= pps_usec
- u_usec
;
1377 if (v_usec
>= bigtick
>> 1)
1379 if (v_usec
< -(bigtick
>> 1))
1382 v_usec
= -(-v_usec
>> pps_shift
);
1384 v_usec
= v_usec
>> pps_shift
;
1386 cal_sec
= tvp
->tv_sec
;
1387 cal_usec
= tvp
->tv_usec
;
1388 cal_sec
-= pps_time
.tv_sec
;
1389 cal_usec
-= pps_time
.tv_usec
;
1391 cal_usec
+= MICROSEC
;
1397 * Check for lost interrupts, noise, excessive jitter and
1398 * excessive frequency error. The number of timer ticks during
1399 * the interval may vary +-1 tick. Add to this a margin of one
1400 * tick for the PPS signal jitter and maximum frequency
1401 * deviation. If the limits are exceeded, the calibration
1402 * interval is reset to the minimum and we start over.
1404 u_usec
= (int)usec_per_tick
<< 1;
1405 if (!((cal_sec
== -1 && cal_usec
> (MICROSEC
- u_usec
)) ||
1406 (cal_sec
== 0 && cal_usec
< u_usec
)) ||
1407 v_usec
> time_tolerance
|| v_usec
< -time_tolerance
) {
1409 pps_shift
= PPS_SHIFT
;
1411 time_status
|= STA_PPSERROR
;
1416 * A three-stage median filter is used to help deglitch the pps
1417 * frequency. The median sample becomes the frequency offset
1418 * estimate; the difference between the other two samples
1419 * becomes the frequency dispersion (stability) estimate.
1421 pps_ff
[2] = pps_ff
[1];
1422 pps_ff
[1] = pps_ff
[0];
1424 if (pps_ff
[0] > pps_ff
[1]) {
1425 if (pps_ff
[1] > pps_ff
[2]) {
1426 u_usec
= pps_ff
[1]; /* 0 1 2 */
1427 v_usec
= pps_ff
[0] - pps_ff
[2];
1428 } else if (pps_ff
[2] > pps_ff
[0]) {
1429 u_usec
= pps_ff
[0]; /* 2 0 1 */
1430 v_usec
= pps_ff
[2] - pps_ff
[1];
1432 u_usec
= pps_ff
[2]; /* 0 2 1 */
1433 v_usec
= pps_ff
[0] - pps_ff
[1];
1436 if (pps_ff
[1] < pps_ff
[2]) {
1437 u_usec
= pps_ff
[1]; /* 2 1 0 */
1438 v_usec
= pps_ff
[2] - pps_ff
[0];
1439 } else if (pps_ff
[2] < pps_ff
[0]) {
1440 u_usec
= pps_ff
[0]; /* 1 0 2 */
1441 v_usec
= pps_ff
[1] - pps_ff
[2];
1443 u_usec
= pps_ff
[2]; /* 1 2 0 */
1444 v_usec
= pps_ff
[1] - pps_ff
[0];
1449 * Here the frequency dispersion (stability) is updated. If it
1450 * is less than one-fourth the maximum (MAXFREQ), the frequency
1451 * offset is updated as well, but clamped to the tolerance. It
1452 * will be processed later by the clock() routine.
1454 v_usec
= (v_usec
>> 1) - pps_stabil
;
1456 pps_stabil
-= -v_usec
>> PPS_AVG
;
1458 pps_stabil
+= v_usec
>> PPS_AVG
;
1459 if (pps_stabil
> MAXFREQ
>> 2) {
1461 time_status
|= STA_PPSWANDER
;
1464 if (time_status
& STA_PPSFREQ
) {
1466 pps_freq
-= -u_usec
>> PPS_AVG
;
1467 if (pps_freq
< -time_tolerance
)
1468 pps_freq
= -time_tolerance
;
1471 pps_freq
+= u_usec
>> PPS_AVG
;
1472 if (pps_freq
> time_tolerance
)
1473 pps_freq
= time_tolerance
;
1478 * Here the calibration interval is adjusted. If the maximum
1479 * time difference is greater than tick / 4, reduce the interval
1480 * by half. If this is not the case for four consecutive
1481 * intervals, double the interval.
1483 if (u_usec
<< pps_shift
> bigtick
>> 2) {
1485 if (pps_shift
> PPS_SHIFT
)
1487 } else if (pps_intcnt
>= 4) {
1489 if (pps_shift
< PPS_SHIFTMAX
)
1495 * If recovering from kmdb, then make sure the tod chip gets resynced.
1496 * If we took an early exit above, then we don't yet have a stable
1497 * calibration signal to lock onto, so don't mark the tod for sync
1498 * until we get all the way here.
1501 int s
= hr_clock_lock();
1509 * Handle clock tick processing for a thread.
1510 * Check for timer action, enforce CPU rlimit, do profiling etc.
1513 clock_tick(kthread_t
*t
, int pending
)
1519 int poke
= 0; /* notify another CPU */
1522 int i
, total_usec
, usec
;
1525 ASSERT(pending
> 0);
1527 /* Must be operating on a lwp/thread */
1528 if ((lwp
= ttolwp(t
)) == NULL
) {
1529 panic("clock_tick: no lwp");
1533 for (i
= 0; i
< pending
; i
++) {
1534 CL_TICK(t
); /* Class specific tick processing */
1535 DTRACE_SCHED1(tick
, kthread_t
*, t
);
1540 /* pp->p_lock makes sure that the thread does not exit */
1541 ASSERT(MUTEX_HELD(&pp
->p_lock
));
1543 user_mode
= (lwp
->lwp_state
== LWP_USER
);
1545 ticks
= (pp
->p_utime
+ pp
->p_stime
) % hz
;
1547 * Update process times. Should use high res clock and state
1548 * changes instead of statistical sampling method. XXX
1551 pp
->p_utime
+= pending
;
1553 pp
->p_stime
+= pending
;
1556 pp
->p_ttime
+= pending
;
1560 * Update user profiling statistics. Get the pc from the
1561 * lwp when the AST happens.
1563 if (pp
->p_prof
.pr_scale
) {
1564 atomic_add_32(&lwp
->lwp_oweupc
, (int32_t)pending
);
1572 * If CPU was in user state, process lwp-virtual time
1573 * interval timer. The value passed to itimerdecr() has to be
1574 * in microseconds and has to be less than one second. Hence
1577 total_usec
= usec_per_tick
* pending
;
1578 while (total_usec
> 0) {
1579 usec
= MIN(total_usec
, (MICROSEC
- 1));
1581 timerisset(&lwp
->lwp_timer
[ITIMER_VIRTUAL
].it_value
) &&
1582 itimerdecr(&lwp
->lwp_timer
[ITIMER_VIRTUAL
], usec
) == 0) {
1584 sigtoproc(pp
, t
, SIGVTALRM
);
1590 * If CPU was in user state, process lwp-profile
1593 total_usec
= usec_per_tick
* pending
;
1594 while (total_usec
> 0) {
1595 usec
= MIN(total_usec
, (MICROSEC
- 1));
1596 if (timerisset(&lwp
->lwp_timer
[ITIMER_PROF
].it_value
) &&
1597 itimerdecr(&lwp
->lwp_timer
[ITIMER_PROF
], usec
) == 0) {
1599 sigtoproc(pp
, t
, SIGPROF
);
1605 * Enforce CPU resource controls:
1606 * (a) process.max-cpu-time resource control
1608 * Perform the check only if we have accumulated more a second.
1610 if ((ticks
+ pending
) >= hz
) {
1611 (void) rctl_test(rctlproc_legacy
[RLIMIT_CPU
], pp
->p_rctls
, pp
,
1612 (pp
->p_utime
+ pp
->p_stime
)/hz
, RCA_UNSAFE_SIGINFO
);
1616 * (b) task.max-cpu-time resource control
1618 * If we have accumulated enough ticks, increment the task CPU
1619 * time usage and test for the resource limit. This minimizes the
1620 * number of calls to the rct_test(). The task CPU time mutex
1621 * is highly contentious as many processes can be sharing a task.
1623 if (pp
->p_ttime
>= clock_tick_proc_max
) {
1624 secs
= task_cpu_time_incr(pp
->p_task
, pp
->p_ttime
);
1627 (void) rctl_test(rc_task_cpu_time
, pp
->p_task
->tk_rctls
,
1628 pp
, secs
, RCA_UNSAFE_SIGINFO
);
1633 * Update memory usage for the currently running process.
1636 PTOU(pp
)->u_mem
+= rss
;
1637 if (rss
> PTOU(pp
)->u_mem_max
)
1638 PTOU(pp
)->u_mem_max
= rss
;
1641 * Notify the CPU the thread is running on.
1643 if (poke
&& t
->t_cpu
!= CPU
)
1644 poke_cpu(t
->t_cpu
->cpu_id
);
1648 profil_tick(uintptr_t upc
)
1651 proc_t
*p
= ttoproc(curthread
);
1652 klwp_t
*lwp
= ttolwp(curthread
);
1653 struct prof
*pr
= &p
->p_prof
;
1656 ticks
= lwp
->lwp_oweupc
;
1657 } while (atomic_cas_32(&lwp
->lwp_oweupc
, ticks
, 0) != ticks
);
1659 mutex_enter(&p
->p_pflock
);
1660 if (pr
->pr_scale
>= 2 && upc
>= pr
->pr_off
) {
1662 * Old-style profiling
1664 uint16_t *slot
= pr
->pr_base
;
1666 if (pr
->pr_scale
!= 2) {
1667 uintptr_t delta
= upc
- pr
->pr_off
;
1668 uintptr_t byteoff
= ((delta
>> 16) * pr
->pr_scale
) +
1669 (((delta
& 0xffff) * pr
->pr_scale
) >> 16);
1670 if (byteoff
>= (uintptr_t)pr
->pr_size
) {
1671 mutex_exit(&p
->p_pflock
);
1674 slot
+= byteoff
/ sizeof (uint16_t);
1676 if (fuword16(slot
, &old
) < 0 ||
1677 (new = old
+ ticks
) > SHRT_MAX
||
1678 suword16(slot
, new) < 0) {
1681 } else if (pr
->pr_scale
== 1) {
1685 model_t model
= lwp_getdatamodel(lwp
);
1690 while (ticks
-- > 0) {
1691 if (pr
->pr_samples
== pr
->pr_size
) {
1692 /* buffer full, turn off sampling */
1696 switch (SIZEOF_PTR(model
)) {
1697 case sizeof (uint32_t):
1698 result
= suword32(pr
->pr_base
, (uint32_t)upc
);
1701 case sizeof (uint64_t):
1702 result
= suword64(pr
->pr_base
, (uint64_t)upc
);
1706 cmn_err(CE_WARN
, "profil_tick: unexpected "
1715 pr
->pr_base
= (caddr_t
)pr
->pr_base
+ SIZEOF_PTR(model
);
1719 mutex_exit(&p
->p_pflock
);
1723 delay_wakeup(void *arg
)
1727 mutex_enter(&t
->t_delay_lock
);
1728 cv_signal(&t
->t_delay_cv
);
1729 mutex_exit(&t
->t_delay_lock
);
1733 * The delay(9F) man page indicates that it can only be called from user or
1734 * kernel context - detect and diagnose bad calls. The following macro will
1735 * produce a limited number of messages identifying bad callers. This is done
1736 * in a macro so that caller() is meaningful. When a bad caller is identified,
1737 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1739 #define DELAY_CONTEXT_CHECK() { \
1744 m = delay_from_interrupt_msg; \
1745 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1746 !panicstr && !devinfo_freeze && \
1747 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1748 f = modgetsymname((uintptr_t)caller(), &off); \
1749 cmn_err(CE_WARN, "delay(9F) called from " \
1750 "interrupt context: %s`%s", \
1751 mod_containing_pc(caller()), f ? f : "..."); \
1756 * delay_common: common delay code.
1759 delay_common(clock_t ticks
)
1761 kthread_t
*t
= curthread
;
1766 /* If timeouts aren't running all we can do is spin. */
1767 if (panicstr
|| devinfo_freeze
) {
1768 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1770 drv_usecwait(TICK_TO_USEC(ticks
));
1774 deadline
= ddi_get_lbolt() + ticks
;
1775 while ((timeleft
= deadline
- ddi_get_lbolt()) > 0) {
1776 mutex_enter(&t
->t_delay_lock
);
1777 id
= timeout_default(delay_wakeup
, t
, timeleft
);
1778 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1779 mutex_exit(&t
->t_delay_lock
);
1780 (void) untimeout_default(id
, 0);
1785 * Delay specified number of clock ticks.
1788 delay(clock_t ticks
)
1790 DELAY_CONTEXT_CHECK();
1792 delay_common(ticks
);
1796 * Delay a random number of clock ticks between 1 and ticks.
1799 delay_random(clock_t ticks
)
1803 DELAY_CONTEXT_CHECK();
1805 (void) random_get_pseudo_bytes((void *)&r
, sizeof (r
));
1808 ticks
= (r
% ticks
) + 1;
1809 delay_common(ticks
);
1813 * Like delay, but interruptible by a signal.
1816 delay_sig(clock_t ticks
)
1818 kthread_t
*t
= curthread
;
1822 /* If timeouts aren't running all we can do is spin. */
1823 if (panicstr
|| devinfo_freeze
) {
1825 drv_usecwait(TICK_TO_USEC(ticks
));
1829 deadline
= ddi_get_lbolt() + ticks
;
1830 mutex_enter(&t
->t_delay_lock
);
1832 rc
= cv_timedwait_sig(&t
->t_delay_cv
,
1833 &t
->t_delay_lock
, deadline
);
1834 /* loop until past deadline or signaled */
1836 mutex_exit(&t
->t_delay_lock
);
1843 #define SECONDS_PER_DAY 86400
1846 * Initialize the system time based on the TOD chip. approx is used as
1847 * an approximation of time (e.g. from the filesystem) in the event that
1848 * the TOD chip has been cleared or is unresponsive. An approx of -1
1849 * means the filesystem doesn't keep time.
1852 clkset(time_t approx
)
1858 mutex_enter(&tod_lock
);
1861 if (ts
.tv_sec
> 365 * SECONDS_PER_DAY
) {
1863 * If the TOD chip is reporting some time after 1971,
1864 * then it probably didn't lose power or become otherwise
1865 * cleared in the recent past; check to assure that
1866 * the time coming from the filesystem isn't in the future
1867 * according to the TOD chip.
1869 if (approx
!= -1 && approx
> ts
.tv_sec
) {
1870 cmn_err(CE_WARN
, "Last shutdown is later "
1871 "than time on time-of-day chip; check date.");
1875 * If the TOD chip isn't giving correct time, set it to the
1876 * greater of i) approx and ii) 1987. That way if approx
1877 * is negative or is earlier than 1987, we set the clock
1878 * back to a time when Oliver North, ALF and Dire Straits
1879 * were all on the collective brain: 1987.
1882 time_t diagnose_date
= (1987 - 1970) * 365 * SECONDS_PER_DAY
;
1883 ts
.tv_sec
= (approx
> diagnose_date
? approx
: diagnose_date
);
1887 * Attempt to write the new time to the TOD chip. Set spl high
1888 * to avoid getting preempted between the tod_set and tod_get.
1895 if (tmp
.tv_sec
!= ts
.tv_sec
&& tmp
.tv_sec
!= ts
.tv_sec
+ 1) {
1898 cmn_err(CE_WARN
, "Time-of-day chip unresponsive.");
1900 cmn_err(CE_WARN
, "Time-of-day chip had "
1901 "incorrect date; check and reset.");
1907 boot_time
= ts
.tv_sec
;
1908 global_zone
->zone_boot_time
= ts
.tv_sec
;
1915 mutex_exit(&tod_lock
);
1918 int timechanged
; /* for testing if the system time has been reset */
1921 set_hrestime(timestruc_t
*ts
)
1923 int spl
= hr_clock_lock();
1925 membar_enter(); /* hrestime must be visible before timechanged++ */
1928 hr_clock_unlock(spl
);
1932 static uint_t deadman_seconds
;
1933 static uint32_t deadman_panics
;
1934 static int deadman_enabled
= 0;
1935 static int deadman_panic_timers
= 1;
1942 * During panic, other CPUs besides the panic
1943 * master continue to handle cyclics and some other
1944 * interrupts. The code below is intended to be
1945 * single threaded, so any CPU other than the master
1948 if (CPU
->cpu_id
!= panic_cpu
.cpu_id
)
1951 if (!deadman_panic_timers
)
1952 return; /* allow all timers to be manually disabled */
1955 * If we are generating a crash dump or syncing filesystems and
1956 * the corresponding timer is set, decrement it and re-enter
1957 * the panic code to abort it and advance to the next state.
1958 * The panic states and triggers are explained in panic.c.
1961 if (dump_timeleft
&& (--dump_timeleft
== 0)) {
1962 panic("panic dump timeout");
1969 if (deadman_counter
!= CPU
->cpu_deadman_counter
) {
1970 CPU
->cpu_deadman_counter
= deadman_counter
;
1971 CPU
->cpu_deadman_countdown
= deadman_seconds
;
1975 if (--CPU
->cpu_deadman_countdown
> 0)
1979 * Regardless of whether or not we actually bring the system down,
1980 * bump the deadman_panics variable.
1982 * N.B. deadman_panics is incremented once for each CPU that
1983 * passes through here. It's expected that all the CPUs will
1984 * detect this condition within one second of each other, so
1985 * when deadman_enabled is off, deadman_panics will
1986 * typically be a multiple of the total number of CPUs in
1989 atomic_inc_32(&deadman_panics
);
1991 if (!deadman_enabled
) {
1992 CPU
->cpu_deadman_countdown
= deadman_seconds
;
1997 * If we're here, we want to bring the system down.
1999 panic("deadman: timed out after %d seconds of clock "
2000 "inactivity", deadman_seconds
);
2006 deadman_online(void *arg
, cpu_t
*cpu
, cyc_handler_t
*hdlr
, cyc_time_t
*when
)
2008 cpu
->cpu_deadman_counter
= 0;
2009 cpu
->cpu_deadman_countdown
= deadman_seconds
;
2011 hdlr
->cyh_func
= (cyc_func_t
)deadman
;
2012 hdlr
->cyh_level
= CY_HIGH_LEVEL
;
2013 hdlr
->cyh_arg
= NULL
;
2016 * Stagger the CPUs so that they don't all run deadman() at
2017 * the same time. Simplest reason to do this is to make it
2018 * more likely that only one CPU will panic in case of a
2019 * timeout. This is (strictly speaking) an aesthetic, not a
2020 * technical consideration.
2022 when
->cyt_when
= cpu
->cpu_id
* (NANOSEC
/ NCPU
);
2023 when
->cyt_interval
= NANOSEC
;
2030 cyc_omni_handler_t hdlr
;
2032 if (deadman_seconds
== 0)
2033 deadman_seconds
= snoop_interval
/ MICROSEC
;
2036 deadman_enabled
= 1;
2038 hdlr
.cyo_online
= deadman_online
;
2039 hdlr
.cyo_offline
= NULL
;
2040 hdlr
.cyo_arg
= NULL
;
2042 mutex_enter(&cpu_lock
);
2043 deadman_cyclic
= cyclic_add_omni(&hdlr
);
2044 mutex_exit(&cpu_lock
);
2048 * tod_fault() is for updating tod validate mechanism state:
2049 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2050 * currently used for debugging only
2051 * (2) The following four cases detected by tod validate mechanism:
2052 * TOD_REVERSED: current tod value is less than previous value.
2053 * TOD_STALLED: current tod value hasn't advanced.
2054 * TOD_JUMPED: current tod value advanced too far from previous value.
2055 * TOD_RATECHANGED: the ratio between average tod delta and
2056 * average tick delta has changed.
2057 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2058 * a virtual TOD provided by a hypervisor.
2061 tod_fault(enum tod_fault_type ftype
, int off
)
2063 ASSERT(MUTEX_HELD(&tod_lock
));
2065 if (tod_faulted
!= ftype
) {
2068 plat_tod_fault(TOD_NOFAULT
);
2069 cmn_err(CE_NOTE
, "Restarted tracking "
2070 "Time of Day clock.");
2071 tod_faulted
= ftype
;
2075 if (tod_faulted
== TOD_NOFAULT
) {
2076 plat_tod_fault(ftype
);
2077 cmn_err(CE_WARN
, "Time of Day clock error: "
2078 "reason [%s by 0x%x]. -- "
2079 " Stopped tracking Time Of Day clock.",
2080 tod_fault_table
[ftype
], off
);
2081 tod_faulted
= ftype
;
2085 case TOD_RATECHANGED
:
2086 if (tod_faulted
== TOD_NOFAULT
) {
2087 plat_tod_fault(ftype
);
2088 cmn_err(CE_WARN
, "Time of Day clock error: "
2090 " Stopped tracking Time Of Day clock.",
2091 tod_fault_table
[ftype
]);
2092 tod_faulted
= ftype
;
2096 if (tod_faulted
== TOD_NOFAULT
) {
2097 plat_tod_fault(ftype
);
2098 cmn_err(CE_NOTE
, "!Time of Day clock is "
2099 "Read-Only; set of Date/Time will not "
2100 "persist across reboot.");
2101 tod_faulted
= ftype
;
2108 return (tod_faulted
);
2112 * Two functions that allow tod_status_flag to be manipulated by functions
2113 * external to this file.
2117 tod_status_set(int tod_flag
)
2119 tod_status_flag
|= tod_flag
;
2123 tod_status_clear(int tod_flag
)
2125 tod_status_flag
&= ~tod_flag
;
2129 * Record a timestamp and the value passed to tod_set(). The next call to
2130 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2131 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2132 * tod_validate() will use prev_tick and prev_tod for this task but these
2133 * become obsolete, and will be re-assigned with the prev_set_* values,
2134 * in the case when the TOD is re-written.
2137 tod_set_prev(timestruc_t ts
)
2139 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2140 tod_validate_deferred
) {
2143 prev_set_tick
= gethrtime();
2145 * A negative value will be set to zero in utc_to_tod() so we fake
2146 * a zero here in such a case. This would need to change if the
2147 * behavior of utc_to_tod() changes.
2149 prev_set_tod
= ts
.tv_sec
< 0 ? 0 : ts
.tv_sec
;
2153 * tod_validate() is used for checking values returned by tod_get().
2154 * Four error cases can be detected by this routine:
2155 * TOD_REVERSED: current tod value is less than previous.
2156 * TOD_STALLED: current tod value hasn't advanced.
2157 * TOD_JUMPED: current tod value advanced too far from previous value.
2158 * TOD_RATECHANGED: the ratio between average tod delta and
2159 * average tick delta has changed.
2162 tod_validate(time_t tod
)
2171 enum tod_fault_type tod_bad
= TOD_NOFAULT
;
2173 static int firsttime
= 1;
2175 static time_t prev_tod
= 0;
2176 static hrtime_t prev_tick
= 0;
2177 static long dtick_avg
= TOD_REF_FREQ
;
2179 int cpr_resume_done
= 0;
2180 int dr_resume_done
= 0;
2182 hrtime_t tick
= gethrtime();
2184 ASSERT(MUTEX_HELD(&tod_lock
));
2187 * tod_validate_enable is patchable via /etc/system.
2188 * If TOD is already faulted, or if TOD validation is deferred,
2189 * there is nothing to do.
2191 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2192 tod_validate_deferred
) {
2197 * If this is the first time through, we just need to save the tod
2198 * we were called with and hrtime so we can use them next time to
2199 * validate tod_get().
2209 * Handle any flags that have been turned on by tod_status_set().
2210 * In the case where a tod_set() is done and then a subsequent
2211 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2212 * true), we treat the TOD_GET_FAILED with precedence by switching
2213 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2214 * until such time as tod_get() completes successfully.
2216 if (tod_status_flag
& TOD_GET_FAILED
) {
2218 * tod_get() has encountered an issue, possibly transitory,
2219 * when reading TOD. We'll just return the incoming tod
2220 * value (which is actually hrestime.tv_sec in this case)
2221 * and when we get a genuine tod, following a successful
2222 * tod_get(), we can validate using prev_tod and prev_tick.
2224 tod_status_flag
&= ~TOD_GET_FAILED
;
2226 } else if (tod_status_flag
& TOD_SET_DONE
) {
2228 * TOD has been modified. Just before the TOD was written,
2229 * tod_set_prev() saved tod and hrtime; we can now use
2230 * those values, prev_set_tod and prev_set_tick, to validate
2231 * the incoming tod that's just been read.
2233 prev_tod
= prev_set_tod
;
2234 prev_tick
= prev_set_tick
;
2235 dtick_avg
= TOD_REF_FREQ
;
2236 tod_status_flag
&= ~TOD_SET_DONE
;
2238 * If a tod_set() preceded a cpr_suspend() without an
2239 * intervening tod_validate(), we need to ensure that a
2240 * TOD_JUMPED condition is ignored.
2241 * Note this isn't a concern in the case of DR as we've
2242 * just reassigned dtick_avg, above.
2244 if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2245 cpr_resume_done
= 1;
2246 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2248 } else if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2250 * The system's coming back from a checkpoint resume.
2252 cpr_resume_done
= 1;
2253 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2255 * We need to handle the possibility of a CPR suspend
2256 * operation having been initiated whilst a DR event was
2259 if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2261 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2263 } else if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2265 * A Dynamic Reconfiguration event has taken place.
2268 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2272 switch (tod_unit_test
) {
2273 case 1: /* for testing jumping tod */
2274 tod
+= tod_test_injector
;
2277 case 2: /* for testing stuck tod bit */
2278 tod
|= 1 << tod_test_injector
;
2281 case 3: /* for testing stalled tod */
2285 case 4: /* reset tod fault status */
2286 (void) tod_fault(TOD_NOFAULT
, 0);
2293 diff_tod
= tod
- prev_tod
;
2294 diff_tick
= tick
- prev_tick
;
2296 ASSERT(diff_tick
>= 0);
2299 /* ERROR - tod reversed */
2300 tod_bad
= TOD_REVERSED
;
2301 off
= (int)(prev_tod
- tod
);
2302 } else if (diff_tod
== 0) {
2303 /* tod did not advance */
2304 if (diff_tick
> TOD_STALL_THRESHOLD
) {
2305 /* ERROR - tod stalled */
2306 tod_bad
= TOD_STALLED
;
2309 * Make sure we don't update prev_tick
2310 * so that diff_tick is calculated since
2311 * the first diff_tod == 0
2316 /* calculate dtick */
2317 dtick
= diff_tick
/ diff_tod
;
2319 /* update dtick averages */
2320 dtick_avg
+= ((dtick
- dtick_avg
) / TOD_FILTER_N
);
2323 * Calculate dtick_delta as
2324 * variation from reference freq in quartiles
2326 dtick_delta
= (dtick_avg
- TOD_REF_FREQ
) /
2327 (TOD_REF_FREQ
>> 2);
2330 * Even with a perfectly functioning TOD device,
2331 * when the number of elapsed seconds is low the
2332 * algorithm can calculate a rate that is beyond
2333 * tolerance, causing an error. The algorithm is
2334 * inaccurate when elapsed time is low (less than
2338 if (dtick
< TOD_JUMP_THRESHOLD
) {
2340 * If we've just done a CPR resume, we detect
2341 * a jump in the TOD but, actually, what's
2342 * happened is that the TOD has been increasing
2343 * whilst the system was suspended and the tick
2344 * count hasn't kept up. We consider the first
2345 * occurrence of this after a resume as normal
2346 * and ignore it; otherwise, in a non-resume
2347 * case, we regard it as a TOD problem.
2349 if (!cpr_resume_done
) {
2350 /* ERROR - tod jumped */
2351 tod_bad
= TOD_JUMPED
;
2352 off
= (int)diff_tod
;
2357 * If we've just done a DR resume, dtick_avg
2358 * can go a bit askew so we reset it and carry
2359 * on; otherwise, the TOD is in error.
2361 if (dr_resume_done
) {
2362 dtick_avg
= TOD_REF_FREQ
;
2364 /* ERROR - change in clock rate */
2365 tod_bad
= TOD_RATECHANGED
;
2371 if (tod_bad
!= TOD_NOFAULT
) {
2372 (void) tod_fault(tod_bad
, off
);
2375 * Disable dosynctodr since we are going to fault
2376 * the TOD chip anyway here
2381 * Set tod to the correct value from hrestime
2383 tod
= hrestime
.tv_sec
;
2392 calcloadavg(int nrun
, uint64_t *hp_ave
)
2394 static int64_t f
[3] = { 135, 27, 9 };
2399 * Compute load average over the last 1, 5, and 15 minutes
2400 * (60, 300, and 900 seconds). The constants in f[3] are for
2401 * exponential decay:
2402 * (1 - exp(-1/60)) << 13 = 135,
2403 * (1 - exp(-1/300)) << 13 = 27,
2404 * (1 - exp(-1/900)) << 13 = 9.
2408 * a little hoop-jumping to avoid integer overflow
2410 for (i
= 0; i
< 3; i
++) {
2411 q
= (hp_ave
[i
] >> 16) << 7;
2412 r
= (hp_ave
[i
] & 0xffff) << 7;
2413 hp_ave
[i
] += ((nrun
- q
) * f
[i
] - ((r
* f
[i
]) >> 16)) >> 4;
2418 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2419 * calculate the value of lbolt according to the current mode. In the event
2420 * driven mode (the default), lbolt is calculated by dividing the current hires
2421 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2422 * an internal variable is incremented at each firing of the lbolt cyclic
2423 * and returned by lbolt_cyclic_driven().
2425 * The system will transition from event to cyclic driven mode when the number
2426 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2427 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2428 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2429 * causing enough activity to cross the thresholds.
2432 lbolt_bootstrap(void)
2439 lbolt_ev_to_cyclic(caddr_t arg1
, caddr_t arg2
)
2444 ASSERT(lbolt_hybrid
!= lbolt_cyclic_driven
);
2449 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2452 * Align the next expiration to a clock tick boundary.
2454 exp
= ts
+ nsec_per_tick
- 1;
2455 exp
= (exp
/nsec_per_tick
) * nsec_per_tick
;
2457 ret
= cyclic_reprogram(lb_info
->id
.lbi_cyclic_id
, exp
);
2460 lbolt_hybrid
= lbolt_cyclic_driven
;
2461 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2462 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2466 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2473 lbolt_event_driven(void)
2477 int ret
, cpu
= CPU
->cpu_seqid
;
2482 ASSERT(nsec_per_tick
> 0);
2483 lb
= (ts
/nsec_per_tick
);
2486 * Switch to cyclic mode if the number of calls to this routine
2487 * has reached the threshold within the interval.
2489 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) < lb_info
->lbi_thresh_interval
) {
2491 if (--lb_cpu
[cpu
].lbc_counter
== 0) {
2493 * Reached the threshold within the interval, reset
2494 * the usage statistics.
2496 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2497 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2500 * Make sure only one thread reprograms the
2501 * lbolt cyclic and changes the mode.
2503 if (panicstr
== NULL
&&
2504 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2506 if (lbolt_hybrid
== lbolt_cyclic_driven
) {
2507 ret
= atomic_dec_32_nv(
2508 &lb_info
->lbi_token
);
2511 lbolt_softint_post();
2517 * Exceeded the interval, reset the usage statistics.
2519 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2520 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2523 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2525 return (lb
- lb_info
->lbi_debug_time
);
2529 lbolt_cyclic_driven(void)
2531 int64_t lb
= lb_info
->lbi_internal
;
2535 * If a CPU has already prevented the lbolt cyclic from deactivating
2536 * itself, don't bother tracking the usage. Otherwise check if we're
2537 * within the interval and how the per CPU counter is doing.
2539 if (lb_info
->lbi_cyc_deactivate
) {
2540 cpu
= CPU
->cpu_seqid
;
2541 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) <
2542 lb_info
->lbi_thresh_interval
) {
2544 if (lb_cpu
[cpu
].lbc_counter
== 0)
2546 * Reached the threshold within the interval,
2547 * prevent the lbolt cyclic from turning itself
2550 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2552 lb_cpu
[cpu
].lbc_counter
--;
2555 * Only reset the usage statistics when we have
2556 * exceeded the interval.
2558 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2559 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2563 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2565 return (lb
- lb_info
->lbi_debug_time
);
2569 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2570 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2571 * It is inactive by default, and will be activated when switching from event
2572 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2573 * by lbolt_cyclic_driven().
2580 lb_info
->lbi_internal
++;
2582 if (!lbolt_cyc_only
) {
2584 if (lb_info
->lbi_cyc_deactivate
) {
2586 * Switching from cyclic to event driven mode.
2588 if (panicstr
== NULL
&&
2589 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2591 if (lbolt_hybrid
== lbolt_event_driven
) {
2592 ret
= atomic_dec_32_nv(
2593 &lb_info
->lbi_token
);
2600 lbolt_hybrid
= lbolt_event_driven
;
2601 ret
= cyclic_reprogram(
2602 lb_info
->id
.lbi_cyclic_id
,
2608 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2614 * The lbolt cyclic should not try to deactivate itself before
2615 * the sampling period has elapsed.
2617 if (lb_info
->lbi_internal
- lb_info
->lbi_cyc_deac_start
>=
2618 lb_info
->lbi_thresh_interval
) {
2619 lb_info
->lbi_cyc_deactivate
= B_TRUE
;
2620 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2626 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2627 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2628 * called by the KDI system claim callbacks to record a hires timestamp at
2629 * debug enter time. lbolt_debug_return() is called by the sistem release
2630 * callbacks to account for the time spent in the debugger. The value is then
2631 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2632 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2635 lbolt_debug_entry(void)
2637 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2638 ASSERT(lb_info
!= NULL
);
2639 lb_info
->lbi_debug_ts
= gethrtime();
2644 * Calculate the time spent in the debugger and add it to the lbolt info
2645 * structure. We also update the internal lbolt value in case we were in
2646 * cyclic driven mode going in.
2649 lbolt_debug_return(void)
2653 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2654 ASSERT(lb_info
!= NULL
);
2655 ASSERT(nsec_per_tick
> 0);
2658 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2659 lb_info
->lbi_debug_time
+=
2660 ((ts
- lb_info
->lbi_debug_ts
)/nsec_per_tick
);
2662 lb_info
->lbi_debug_ts
= 0;