4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 #include <sys/param.h>
30 #include <sys/t_lock.h>
31 #include <sys/types.h>
32 #include <sys/tuneable.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/cpuvar.h>
39 #include <sys/callo.h>
42 #include <sys/cmn_err.h>
44 #include <sys/vmsystm.h>
45 #include <sys/class.h>
47 #include <sys/debug.h>
48 #include <sys/vtrace.h>
50 #include <sys/atomic.h>
51 #include <sys/dumphdr.h>
52 #include <sys/archsystm.h>
53 #include <sys/fs/swapnode.h>
54 #include <sys/panic.h>
56 #include <sys/msacct.h>
57 #include <sys/mem_cage.h>
62 #include <sys/cyclic.h>
63 #include <sys/cpupart.h>
67 #include <sys/ddi_periodic.h>
68 #include <sys/random.h>
69 #include <sys/modctl.h>
74 #include <sys/timex.h>
75 #include <sys/inttypes.h>
77 #include <sys/sunddi.h>
78 #include <sys/clock_impl.h>
81 * clock() is called straight from the clock cyclic; see clock_init().
89 extern kcondvar_t fsflush_cv
;
90 extern sysinfo_t sysinfo
;
91 extern vminfo_t vminfo
;
92 extern int idleswtch
; /* flag set while idle in pswtch() */
93 extern hrtime_t
volatile devinfo_freeze
;
96 * high-precision avenrun values. These are needed to make the
97 * regular avenrun values accurate.
99 static uint64_t hp_avenrun
[3];
100 int avenrun
[3]; /* FSCALED average run queue lengths */
101 time_t time
; /* time in seconds since 1970 - for compatibility only */
103 static struct loadavg_s loadavg
;
105 * Phase/frequency-lock loop (PLL/FLL) definitions
107 * The following variables are read and set by the ntp_adjtime() system
110 * time_state shows the state of the system clock, with values defined
111 * in the timex.h header file.
113 * time_status shows the status of the system clock, with bits defined
114 * in the timex.h header file.
116 * time_offset is used by the PLL/FLL to adjust the system time in small
119 * time_constant determines the bandwidth or "stiffness" of the PLL.
121 * time_tolerance determines maximum frequency error or tolerance of the
122 * CPU clock oscillator and is a property of the architecture; however,
123 * in principle it could change as result of the presence of external
124 * discipline signals, for instance.
126 * time_precision is usually equal to the kernel tick variable; however,
127 * in cases where a precision clock counter or external clock is
128 * available, the resolution can be much less than this and depend on
129 * whether the external clock is working or not.
131 * time_maxerror is initialized by a ntp_adjtime() call and increased by
132 * the kernel once each second to reflect the maximum error bound
135 * time_esterror is set and read by the ntp_adjtime() call, but
136 * otherwise not used by the kernel.
138 int32_t time_state
= TIME_OK
; /* clock state */
139 int32_t time_status
= STA_UNSYNC
; /* clock status bits */
140 int32_t time_offset
= 0; /* time offset (us) */
141 int32_t time_constant
= 0; /* pll time constant */
142 int32_t time_tolerance
= MAXFREQ
; /* frequency tolerance (scaled ppm) */
143 int32_t time_precision
= 1; /* clock precision (us) */
144 int32_t time_maxerror
= MAXPHASE
; /* maximum error (us) */
145 int32_t time_esterror
= MAXPHASE
; /* estimated error (us) */
148 * The following variables establish the state of the PLL/FLL and the
149 * residual time and frequency offset of the local clock. The scale
150 * factors are defined in the timex.h header file.
152 * time_phase and time_freq are the phase increment and the frequency
153 * increment, respectively, of the kernel time variable.
155 * time_freq is set via ntp_adjtime() from a value stored in a file when
156 * the synchronization daemon is first started. Its value is retrieved
157 * via ntp_adjtime() and written to the file about once per hour by the
160 * time_adj is the adjustment added to the value of tick at each timer
161 * interrupt and is recomputed from time_phase and time_freq at each
164 * time_reftime is the second's portion of the system time at the last
165 * call to ntp_adjtime(). It is used to adjust the time_freq variable
166 * and to increase the time_maxerror as the time since last update
169 int32_t time_phase
= 0; /* phase offset (scaled us) */
170 int32_t time_freq
= 0; /* frequency offset (scaled ppm) */
171 int32_t time_adj
= 0; /* tick adjust (scaled 1 / hz) */
172 int32_t time_reftime
= 0; /* time at last adjustment (s) */
175 * The scale factors of the following variables are defined in the
176 * timex.h header file.
178 * pps_time contains the time at each calibration interval, as read by
179 * microtime(). pps_count counts the seconds of the calibration
180 * interval, the duration of which is nominally pps_shift in powers of
183 * pps_offset is the time offset produced by the time median filter
184 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
187 * pps_freq is the frequency offset produced by the frequency median
188 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
191 * pps_usec is latched from a high resolution counter or external clock
192 * at pps_time. Here we want the hardware counter contents only, not the
193 * contents plus the time_tv.usec as usual.
195 * pps_valid counts the number of seconds since the last PPS update. It
196 * is used as a watchdog timer to disable the PPS discipline should the
197 * PPS signal be lost.
199 * pps_glitch counts the number of seconds since the beginning of an
200 * offset burst more than tick/2 from current nominal offset. It is used
201 * mainly to suppress error bursts due to priority conflicts between the
202 * PPS interrupt and timer interrupt.
204 * pps_intcnt counts the calibration intervals for use in the interval-
205 * adaptation algorithm. It's just too complicated for words.
207 struct timeval pps_time
; /* kernel time at last interval */
208 int32_t pps_tf
[] = {0, 0, 0}; /* pps time offset median filter (us) */
209 int32_t pps_offset
= 0; /* pps time offset (us) */
210 int32_t pps_jitter
= MAXTIME
; /* time dispersion (jitter) (us) */
211 int32_t pps_ff
[] = {0, 0, 0}; /* pps frequency offset median filter */
212 int32_t pps_freq
= 0; /* frequency offset (scaled ppm) */
213 int32_t pps_stabil
= MAXFREQ
; /* frequency dispersion (scaled ppm) */
214 int32_t pps_usec
= 0; /* microsec counter at last interval */
215 int32_t pps_valid
= PPS_VALID
; /* pps signal watchdog counter */
216 int32_t pps_glitch
= 0; /* pps signal glitch counter */
217 int32_t pps_count
= 0; /* calibration interval counter (s) */
218 int32_t pps_shift
= PPS_SHIFT
; /* interval duration (s) (shift) */
219 int32_t pps_intcnt
= 0; /* intervals at current duration */
222 * PPS signal quality monitors
224 * pps_jitcnt counts the seconds that have been discarded because the
225 * jitter measured by the time median filter exceeds the limit MAXTIME
228 * pps_calcnt counts the frequency calibration intervals, which are
229 * variable from 4 s to 256 s.
231 * pps_errcnt counts the calibration intervals which have been discarded
232 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
233 * calibration interval jitter exceeds two ticks.
235 * pps_stbcnt counts the calibration intervals that have been discarded
236 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
238 int32_t pps_jitcnt
= 0; /* jitter limit exceeded */
239 int32_t pps_calcnt
= 0; /* calibration intervals */
240 int32_t pps_errcnt
= 0; /* calibration errors */
241 int32_t pps_stbcnt
= 0; /* stability limit exceeded */
246 * Hybrid lbolt implementation:
248 * The service historically provided by the lbolt and lbolt64 variables has
249 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
250 * original symbols removed from the system. The once clock driven variables are
251 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
252 * the appropriate clock resolution. The default event driven implementation is
253 * complemented by a cyclic driven one, active only during periods of intense
254 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
255 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
256 * rely on the original low cost of consulting a memory position.
258 * The implementation uses the number of calls to these routines and the
259 * frequency of these to determine when to transition from event to cyclic
260 * driven and vice-versa. These values are kept on a per CPU basis for
261 * scalability reasons and to prevent CPUs from constantly invalidating a single
262 * cache line when modifying a global variable. The transition from event to
263 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
264 * can cause such transition.
266 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
267 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
268 * lbolt_cyclic_driven() according to the current mode. When the thresholds
269 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
270 * fire at a nsec_per_tick interval and increment an internal variable at
271 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
272 * will simply return the value of such variable. lbolt_cyclic() will attempt
273 * to shut itself off at each threshold interval (sampling period for calls
274 * to the DDI lbolt routines), and return to the event driven mode, but will
275 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
277 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
278 * for the cyclic subsystem to be intialized.
281 int64_t lbolt_bootstrap(void);
282 int64_t lbolt_event_driven(void);
283 int64_t lbolt_cyclic_driven(void);
284 int64_t (*lbolt_hybrid
)(void) = lbolt_bootstrap
;
285 uint_t
lbolt_ev_to_cyclic(caddr_t
, caddr_t
);
288 * lbolt's cyclic, installed by clock_init().
290 static void lbolt_cyclic(void);
293 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
294 * from switching back to event driven, once it reaches cyclic mode.
296 static boolean_t lbolt_cyc_only
= B_FALSE
;
299 * Cache aligned, per CPU structure with lbolt usage statistics.
301 static lbolt_cpu_t
*lb_cpu
;
304 * Single, cache aligned, structure with all the information required by
305 * the lbolt implementation.
307 lbolt_info_t
*lb_info
;
310 int one_sec
= 1; /* turned on once every second */
311 static int fsflushcnt
; /* counter for t_fsflushr */
312 int dosynctodr
= 1; /* patchable; enable/disable sync to TOD chip */
313 int tod_needsync
= 0; /* need to sync tod chip with software time */
314 static int tod_broken
= 0; /* clock chip doesn't work */
315 time_t boot_time
= 0; /* Boot time in seconds since 1970 */
316 cyclic_id_t clock_cyclic
; /* clock()'s cyclic_id */
317 cyclic_id_t deadman_cyclic
; /* deadman()'s cyclic_id */
319 extern void clock_tick_schedule(int);
321 static int lgrp_ticks
; /* counter to schedule lgrp load calcs */
324 * for tod fault detection
326 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
327 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
328 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
329 #define TOD_FILTER_N 4
330 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
331 static enum tod_fault_type tod_faulted
= TOD_NOFAULT
;
333 static int tod_status_flag
= 0; /* used by tod_validate() */
335 static hrtime_t prev_set_tick
= 0; /* gethrtime() prior to tod_set() */
336 static time_t prev_set_tod
= 0; /* tv_sec value passed to tod_set() */
338 /* patchable via /etc/system */
339 int tod_validate_enable
= 1;
341 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
342 int delay_from_interrupt_diagnose
= 0;
343 volatile uint32_t delay_from_interrupt_msg
= 20;
346 * On non-SPARC systems, TOD validation must be deferred until gethrtime
347 * returns non-zero values (after mach_clkinit's execution).
348 * On SPARC systems, it must be deferred until after hrtime_base
349 * and hres_last_tick are set (in the first invocation of hres_tick).
350 * Since in both cases the prerequisites occur before the invocation of
351 * tod_get() in clock(), the deferment is lifted there.
353 static boolean_t tod_validate_deferred
= B_TRUE
;
356 * tod_fault_table[] must be aligned with
357 * enum tod_fault_type in systm.h
359 static char *tod_fault_table
[] = {
360 "Reversed", /* TOD_REVERSED */
361 "Stalled", /* TOD_STALLED */
362 "Jumped", /* TOD_JUMPED */
363 "Changed in Clock Rate", /* TOD_RATECHANGED */
364 "Is Read-Only" /* TOD_RDONLY */
366 * no strings needed for TOD_NOFAULT
371 * test hook for tod broken detection in tod_validate
373 int tod_unit_test
= 0;
374 time_t tod_test_injector
;
376 #define CLOCK_ADJ_HIST_SIZE 4
378 static int adj_hist_entry
;
380 int64_t clock_adj_hist
[CLOCK_ADJ_HIST_SIZE
];
382 static void calcloadavg(int, uint64_t *);
383 static int genloadavg(struct loadavg_s
*);
384 static void loadavg_update();
386 void (*cmm_clock_callout
)() = NULL
;
387 void (*cpucaps_clock_callout
)() = NULL
;
389 extern clock_t clock_tick_proc_max
;
391 static int64_t deadman_counter
= 0;
401 extern void set_freemem();
408 clock_t now
= LBOLT_NO_ACCOUNT
; /* current tick */
414 * Make sure that 'freemem' do not drift too far from the truth
420 * Before the section which is repeated is executed, we do
421 * the time delta processing which occurs every clock tick
423 * There is additional processing which happens every time
424 * the nanosecond counter rolls over which is described
425 * below - see the section which begins with : if (one_sec)
427 * This section marks the beginning of the precision-kernel
430 * First, compute the phase adjustment. If the low-order bits
431 * (time_phase) of the update overflow, bump the higher order
432 * bits (time_update).
434 time_phase
+= time_adj
;
435 if (time_phase
<= -FINEUSEC
) {
436 ltemp
= -time_phase
/ SCALE_PHASE
;
437 time_phase
+= ltemp
* SCALE_PHASE
;
439 timedelta
-= ltemp
* (NANOSEC
/MICROSEC
);
441 } else if (time_phase
>= FINEUSEC
) {
442 ltemp
= time_phase
/ SCALE_PHASE
;
443 time_phase
-= ltemp
* SCALE_PHASE
;
445 timedelta
+= ltemp
* (NANOSEC
/MICROSEC
);
450 * End of precision-kernel code fragment which is processed
451 * every timer interrupt.
453 * Continue with the interrupt processing as scheduled.
456 * Count the number of runnable threads and the number waiting
457 * for some form of I/O to complete -- gets added to
458 * sysinfo.waiting. To know the state of the system, must add
459 * wait counts from all CPUs. Also add up the per-partition
466 * keep track of when to update lgrp/part loads
470 if (lgrp_ticks
++ >= hz
/ 10) {
481 * First count the threads waiting on kpreempt queues in each
485 cpupart
= cp_list_head
;
487 uint_t cpupart_nrunnable
= cpupart
->cp_kp_queue
.disp_nrunnable
;
489 cpupart
->cp_updates
++;
490 nrunnable
+= cpupart_nrunnable
;
491 cpupart
->cp_nrunnable_cum
+= cpupart_nrunnable
;
493 cpupart
->cp_nrunning
= 0;
494 cpupart
->cp_nrunnable
= cpupart_nrunnable
;
496 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
499 /* Now count the per-CPU statistics. */
502 uint_t cpu_nrunnable
= cp
->cpu_disp
->disp_nrunnable
;
504 nrunnable
+= cpu_nrunnable
;
505 cpupart
= cp
->cpu_part
;
506 cpupart
->cp_nrunnable_cum
+= cpu_nrunnable
;
508 cpupart
->cp_nrunnable
+= cpu_nrunnable
;
510 * Update user, system, and idle cpu times.
512 cpupart
->cp_nrunning
++;
514 * w_io is used to update sysinfo.waiting during
515 * one_second processing below. Only gather w_io
516 * information when we walk the list of cpus if we're
517 * going to perform one_second processing.
519 w_io
+= CPU_STATS(cp
, sys
.iowait
);
522 if (one_sec
&& (cp
->cpu_flags
& CPU_EXISTS
)) {
524 hrtime_t intracct
, intrused
;
525 const hrtime_t maxnsec
= 1000000000;
526 const int precision
= 100;
529 * Estimate interrupt load on this cpu each second.
530 * Computes cpu_intrload as %utilization (0-99).
533 /* add up interrupt time from all micro states */
534 for (intracct
= 0, i
= 0; i
< NCMSTATES
; i
++)
535 intracct
+= cp
->cpu_intracct
[i
];
536 scalehrtime(&intracct
);
538 /* compute nsec used in the past second */
539 intrused
= intracct
- cp
->cpu_intrlast
;
540 cp
->cpu_intrlast
= intracct
;
542 /* limit the value for safety (and the first pass) */
543 if (intrused
>= maxnsec
)
544 intrused
= maxnsec
- 1;
546 /* calculate %time in interrupt */
547 load
= (precision
* intrused
) / maxnsec
;
548 ASSERT(load
>= 0 && load
< precision
);
549 change
= cp
->cpu_intrload
- load
;
551 /* jump to new max, or decay the old max */
553 cp
->cpu_intrload
= load
;
555 cp
->cpu_intrload
-= (change
+ 3) / 4;
557 DTRACE_PROBE3(cpu_intrload
,
564 (cp
->cpu_flags
& CPU_EXISTS
)) {
566 * When updating the lgroup's load average,
567 * account for the thread running on the CPU.
568 * If the CPU is the current one, then we need
569 * to account for the underlying thread which
570 * got the clock interrupt not the thread that is
571 * handling the interrupt and caculating the load
579 * Account for the load average for this thread if
580 * it isn't the idle thread or it is on the interrupt
581 * stack and not the current CPU handling the clock
584 if ((t
&& t
!= cp
->cpu_idle_thread
) || (CPU
!= cp
&&
586 if (t
->t_lpl
== cp
->cpu_lpl
) {
591 * This is a remote thread, charge it
592 * against its home lgroup. Note that
593 * we notice that a thread is remote
594 * only if it's currently executing.
595 * This is a reasonable approximation,
596 * since queued remote threads are rare.
597 * Note also that if we didn't charge
598 * it to its home lgroup, remote
599 * execution would often make a system
600 * appear balanced even though it was
601 * not, and thread placement/migration
602 * would often not be done correctly.
604 lgrp_loadavg(t
->t_lpl
,
605 LGRP_LOADAVG_IN_THREAD_MAX
, 0);
608 lgrp_loadavg(cp
->cpu_lpl
,
609 cpu_nrunnable
* LGRP_LOADAVG_IN_THREAD_MAX
, 1);
611 } while ((cp
= cp
->cpu_next
) != cpu_list
);
613 clock_tick_schedule(one_sec
);
616 * Check for a callout that needs be called from the clock
617 * thread to support the membership protocol in a clustered
618 * system. Copy the function pointer so that we can reset
619 * this to NULL if needed.
621 if ((funcp
= cmm_clock_callout
) != NULL
)
624 if ((funcp
= cpucaps_clock_callout
) != NULL
)
628 * Wakeup the cageout thread waiters once per second.
640 * Beginning of precision-kernel code fragment executed
643 * On rollover of the second the phase adjustment to be
644 * used for the next second is calculated. Also, the
645 * maximum error is increased by the tolerance. If the
646 * PPS frequency discipline code is present, the phase is
647 * increased to compensate for the CPU clock oscillator
650 * On a 32-bit machine and given parameters in the timex.h
651 * header file, the maximum phase adjustment is +-512 ms
652 * and maximum frequency offset is (a tad less than)
653 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
655 time_maxerror
+= time_tolerance
/ SCALE_USEC
;
658 * Leap second processing. If in leap-insert state at
659 * the end of the day, the system clock is set back one
660 * second; if in leap-delete state, the system clock is
661 * set ahead one second. The microtime() routine or
662 * external clock driver will insure that reported time
663 * is always monotonic. The ugly divides should be
666 switch (time_state
) {
669 if (time_status
& STA_INS
)
670 time_state
= TIME_INS
;
671 else if (time_status
& STA_DEL
)
672 time_state
= TIME_DEL
;
676 if (hrestime
.tv_sec
% 86400 == 0) {
680 time_state
= TIME_OOP
;
685 if ((hrestime
.tv_sec
+ 1) % 86400 == 0) {
689 time_state
= TIME_WAIT
;
694 time_state
= TIME_WAIT
;
698 if (!(time_status
& (STA_INS
| STA_DEL
)))
699 time_state
= TIME_OK
;
705 * Compute the phase adjustment for the next second. In
706 * PLL mode, the offset is reduced by a fixed factor
707 * times the time constant. In FLL mode the offset is
708 * used directly. In either mode, the maximum phase
709 * adjustment for each second is clamped so as to spread
710 * the adjustment over not more than the number of
711 * seconds between updates.
713 if (time_offset
== 0)
715 else if (time_offset
< 0) {
716 lltemp
= -time_offset
;
717 if (!(time_status
& STA_FLL
)) {
718 if ((1 << time_constant
) >= SCALE_KG
)
719 lltemp
*= (1 << time_constant
) /
722 lltemp
= (lltemp
/ SCALE_KG
) >>
725 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
726 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
727 time_offset
+= lltemp
;
728 time_adj
= -(lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
730 lltemp
= time_offset
;
731 if (!(time_status
& STA_FLL
)) {
732 if ((1 << time_constant
) >= SCALE_KG
)
733 lltemp
*= (1 << time_constant
) /
736 lltemp
= (lltemp
/ SCALE_KG
) >>
739 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
740 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
741 time_offset
-= lltemp
;
742 time_adj
= (lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
746 * Compute the frequency estimate and additional phase
747 * adjustment due to frequency error for the next
748 * second. When the PPS signal is engaged, gnaw on the
749 * watchdog counter and update the frequency computed by
750 * the pll and the PPS signal.
753 if (pps_valid
== PPS_VALID
) {
754 pps_jitter
= MAXTIME
;
755 pps_stabil
= MAXFREQ
;
756 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
757 STA_PPSWANDER
| STA_PPSERROR
);
759 lltemp
= time_freq
+ pps_freq
;
762 time_adj
+= (lltemp
* SCALE_PHASE
) / (SCALE_USEC
* hz
);
765 * End of precision kernel-code fragment
767 * The section below should be modified if we are planning
768 * to use NTP for synchronization.
770 * Note: the clock synchronization code now assumes
772 * - if dosynctodr is 1, then compute the drift between
773 * the tod chip and software time and adjust one or
774 * the other depending on the circumstances
776 * - if dosynctodr is 0, then the tod chip is independent
777 * of the software clock and should not be adjusted,
778 * but allowed to free run. this allows NTP to sync.
779 * hrestime without any interference from the tod chip.
782 tod_validate_deferred
= B_FALSE
;
783 mutex_enter(&tod_lock
);
785 drift
= tod
.tv_sec
- hrestime
.tv_sec
;
786 absdrift
= (drift
>= 0) ? drift
: -drift
;
787 if (tod_needsync
|| absdrift
> 1) {
790 if (!tod_broken
&& tod_faulted
== TOD_NOFAULT
) {
793 membar_enter(); /* hrestime visible */
802 if (tod_needsync
|| !dosynctodr
) {
811 * If the drift is 2 seconds on the
812 * money, then the TOD is adjusting
813 * the clock; record that.
815 clock_adj_hist
[adj_hist_entry
++ %
816 CLOCK_ADJ_HIST_SIZE
] = now
;
818 timedelta
= (int64_t)drift
*NANOSEC
;
824 time
= gethrestime_sec(); /* for crusty old kmem readers */
825 mutex_exit(&tod_lock
);
828 * Some drivers still depend on this... XXX
830 cv_broadcast(&lbolt_cv
);
832 vminfo
.freemem
+= freemem
;
834 pgcnt_t maxswap
, resv
, free
;
836 MAX((spgcnt_t
)(availrmem
- swapfs_minfree
), 0);
838 maxswap
= k_anoninfo
.ani_mem_resv
+
839 k_anoninfo
.ani_max
+avail
;
840 /* Update ani_free */
842 free
= k_anoninfo
.ani_free
+ avail
;
843 resv
= k_anoninfo
.ani_phys_resv
+
844 k_anoninfo
.ani_mem_resv
;
846 vminfo
.swap_resv
+= resv
;
847 /* number of reserved and allocated pages */
850 cmn_err(CE_WARN
, "clock: maxswap < free");
852 cmn_err(CE_WARN
, "clock: maxswap < resv");
854 vminfo
.swap_alloc
+= maxswap
- free
;
855 vminfo
.swap_avail
+= maxswap
- resv
;
856 vminfo
.swap_free
+= free
;
860 sysinfo
.runque
+= nrunnable
;
864 sysinfo
.swpque
+= nswapped
;
867 sysinfo
.waiting
+= w_io
;
871 * Wake up fsflush to write out DELWRI
872 * buffers, dirty pages and other cached
873 * administrative data, e.g. inodes.
875 if (--fsflushcnt
<= 0) {
876 fsflushcnt
= tune
.t_fsflushr
;
877 cv_signal(&fsflush_cv
);
881 calcloadavg(genloadavg(&loadavg
), hp_avenrun
);
882 for (i
= 0; i
< 3; i
++)
884 * At the moment avenrun[] can only hold 31
885 * bits of load average as it is a signed
886 * int in the API. We need to ensure that
887 * hp_avenrun[i] >> (16 - FSHIFT) will not be
888 * too large. If it is, we put the largest value
889 * that we can use into avenrun[i]. This is
890 * kludgey, but about all we can do until we
891 * avenrun[] is declared as an array of uint64[]
893 if (hp_avenrun
[i
] < ((uint64_t)1<<(31+16-FSHIFT
)))
894 avenrun
[i
] = (int32_t)(hp_avenrun
[i
] >>
897 avenrun
[i
] = 0x7fffffff;
899 cpupart
= cp_list_head
;
901 calcloadavg(genloadavg(&cpupart
->cp_loadavg
),
902 cpupart
->cp_hp_avenrun
);
903 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
906 * Wake up the swapper thread if necessary.
909 (runout
&& (avefree
< desfree
|| wake_sched_sec
))) {
912 if (t
->t_state
== TS_STOPPED
) {
917 t
->t_schedflag
&= ~TS_ALLSTART
;
918 THREAD_TRANSITION(t
);
926 * Wake up the swapper if any high priority swapped-out threads
927 * became runable during the last tick.
932 if (t
->t_state
== TS_STOPPED
) {
937 t
->t_schedflag
&= ~TS_ALLSTART
;
938 THREAD_TRANSITION(t
);
948 cyc_handler_t clk_hdlr
, lbolt_hdlr
;
949 cyc_time_t clk_when
, lbolt_when
;
954 * Setup handler and timer for the clock cyclic.
956 clk_hdlr
.cyh_func
= (cyc_func_t
)clock
;
957 clk_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
958 clk_hdlr
.cyh_arg
= NULL
;
960 clk_when
.cyt_when
= 0;
961 clk_when
.cyt_interval
= nsec_per_tick
;
964 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
965 * interval to satisfy performance needs of the DDI lbolt consumers.
966 * It is off by default.
968 lbolt_hdlr
.cyh_func
= (cyc_func_t
)lbolt_cyclic
;
969 lbolt_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
970 lbolt_hdlr
.cyh_arg
= NULL
;
972 lbolt_when
.cyt_interval
= nsec_per_tick
;
975 * Allocate cache line aligned space for the per CPU lbolt data and
976 * lbolt info structures, and initialize them with their default
977 * values. Note that these structures are also cache line sized.
979 sz
= sizeof (lbolt_info_t
) + CPU_CACHE_COHERENCE_SIZE
;
980 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
981 lb_info
= (lbolt_info_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
983 if (hz
!= HZ_DEFAULT
)
984 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
*
987 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
;
989 lb_info
->lbi_thresh_calls
= LBOLT_THRESH_CALLS
;
991 sz
= (sizeof (lbolt_cpu_t
) * max_ncpus
) + CPU_CACHE_COHERENCE_SIZE
;
992 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
993 lb_cpu
= (lbolt_cpu_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
995 for (i
= 0; i
< max_ncpus
; i
++)
996 lb_cpu
[i
].lbc_counter
= lb_info
->lbi_thresh_calls
;
999 * Install the softint used to switch between event and cyclic driven
1000 * lbolt. We use a soft interrupt to make sure the context of the
1001 * cyclic reprogram call is safe.
1003 lbolt_softint_add();
1006 * Since the hybrid lbolt implementation is based on a hardware counter
1007 * that is reset at every hardware reboot and that we'd like to have
1008 * the lbolt value starting at zero after both a hardware and a fast
1009 * reboot, we calculate the number of clock ticks the system's been up
1010 * and store it in the lbi_debug_time field of the lbolt info structure.
1011 * The value of this field will be subtracted from lbolt before
1014 lb_info
->lbi_internal
= lb_info
->lbi_debug_time
=
1015 (gethrtime()/nsec_per_tick
);
1018 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1019 * and lbolt_debug_{enter,return} use this value as an indication that
1020 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1021 * to either lbolt_{cyclic,event}_driven here signals those code paths
1022 * that the lbolt related structures can be used.
1024 if (lbolt_cyc_only
) {
1025 lbolt_when
.cyt_when
= 0;
1026 lbolt_hybrid
= lbolt_cyclic_driven
;
1028 lbolt_when
.cyt_when
= CY_INFINITY
;
1029 lbolt_hybrid
= lbolt_event_driven
;
1033 * Grab cpu_lock and install all three cyclics.
1035 mutex_enter(&cpu_lock
);
1037 clock_cyclic
= cyclic_add(&clk_hdlr
, &clk_when
);
1038 lb_info
->id
.lbi_cyclic_id
= cyclic_add(&lbolt_hdlr
, &lbolt_when
);
1040 mutex_exit(&cpu_lock
);
1044 * Called before calcloadavg to get 10-sec moving loadavg together
1048 genloadavg(struct loadavg_s
*avgs
)
1051 int spos
; /* starting position */
1052 int cpos
; /* moving current position */
1057 /* 10-second snapshot, calculate first positon */
1058 if (avgs
->lg_len
== 0) {
1061 slen
= avgs
->lg_len
< S_MOVAVG_SZ
? avgs
->lg_len
: S_MOVAVG_SZ
;
1063 spos
= (avgs
->lg_cur
- 1) >= 0 ? avgs
->lg_cur
- 1 :
1064 S_LOADAVG_SZ
+ (avgs
->lg_cur
- 1);
1065 for (i
= hr_avg
= 0; i
< slen
; i
++) {
1066 cpos
= (spos
- i
) >= 0 ? spos
- i
: S_LOADAVG_SZ
+ (spos
- i
);
1067 hr_avg
+= avgs
->lg_loads
[cpos
];
1070 hr_avg
= hr_avg
/ slen
;
1071 avg
= hr_avg
/ (NANOSEC
/ LGRP_LOADAVG_IN_THREAD_MAX
);
1077 * Run every second from clock () to update the loadavg count available to the
1078 * system and cpu-partitions.
1080 * This works by sampling the previous usr, sys, wait time elapsed,
1081 * computing a delta, and adding that delta to the elapsed usr, sys,
1094 loadavg
.lg_total
= 0;
1097 * first pass totals up per-cpu statistics for system and cpu
1102 struct loadavg_s
*lavg
;
1104 lavg
= &cp
->cpu_loadavg
;
1106 cpu_total
= cp
->cpu_acct
[CMS_USER
] +
1107 cp
->cpu_acct
[CMS_SYSTEM
] + cp
->cpu_waitrq
;
1108 /* compute delta against last total */
1109 scalehrtime(&cpu_total
);
1110 prev
= (lavg
->lg_cur
- 1) >= 0 ? lavg
->lg_cur
- 1 :
1111 S_LOADAVG_SZ
+ (lavg
->lg_cur
- 1);
1112 if (lavg
->lg_loads
[prev
] <= 0) {
1113 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1116 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1117 cpu_total
= cpu_total
- lavg
->lg_loads
[prev
];
1122 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1123 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1124 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1126 loadavg
.lg_total
+= cpu_total
;
1127 cp
->cpu_part
->cp_loadavg
.lg_total
+= cpu_total
;
1129 } while ((cp
= cp
->cpu_next
) != cpu_list
);
1131 loadavg
.lg_loads
[loadavg
.lg_cur
] = loadavg
.lg_total
;
1132 loadavg
.lg_cur
= (loadavg
.lg_cur
+ 1) % S_LOADAVG_SZ
;
1133 loadavg
.lg_len
= (loadavg
.lg_len
+ 1) < S_LOADAVG_SZ
?
1134 loadavg
.lg_len
+ 1 : S_LOADAVG_SZ
;
1136 * Second pass updates counts
1138 cpupart
= cp_list_head
;
1141 struct loadavg_s
*lavg
;
1143 lavg
= &cpupart
->cp_loadavg
;
1144 lavg
->lg_loads
[lavg
->lg_cur
] = lavg
->lg_total
;
1146 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1147 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1148 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1150 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
1155 * clock_update() - local clock update
1157 * This routine is called by ntp_adjtime() to update the local clock
1158 * phase and frequency. The implementation is of an
1159 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1160 * routine computes new time and frequency offset estimates for each
1161 * call. The PPS signal itself determines the new time offset,
1162 * instead of the calling argument. Presumably, calls to
1163 * ntp_adjtime() occur only when the caller believes the local clock
1164 * is valid within some bound (+-128 ms with NTP). If the caller's
1165 * time is far different than the PPS time, an argument will ensue,
1166 * and it's not clear who will lose.
1168 * For uncompensated quartz crystal oscillatores and nominal update
1169 * intervals less than 1024 s, operation should be in phase-lock mode
1170 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1171 * intervals greater than this, operation should be in frequency-lock
1172 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1174 * Note: mutex(&tod_lock) is in effect.
1177 clock_update(int offset
)
1179 int ltemp
, mtemp
, s
;
1181 ASSERT(MUTEX_HELD(&tod_lock
));
1183 if (!(time_status
& STA_PLL
) && !(time_status
& STA_PPSTIME
))
1186 if ((time_status
& STA_PPSTIME
) && (time_status
& STA_PPSSIGNAL
))
1190 * Scale the phase adjustment and clamp to the operating range.
1192 if (ltemp
> MAXPHASE
)
1193 time_offset
= MAXPHASE
* SCALE_UPDATE
;
1194 else if (ltemp
< -MAXPHASE
)
1195 time_offset
= -(MAXPHASE
* SCALE_UPDATE
);
1197 time_offset
= ltemp
* SCALE_UPDATE
;
1200 * Select whether the frequency is to be controlled and in which
1201 * mode (PLL or FLL). Clamp to the operating range. Ugly
1202 * multiply/divide should be replaced someday.
1204 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0)
1205 time_reftime
= hrestime
.tv_sec
;
1207 mtemp
= hrestime
.tv_sec
- time_reftime
;
1208 time_reftime
= hrestime
.tv_sec
;
1210 if (time_status
& STA_FLL
) {
1211 if (mtemp
>= MINSEC
) {
1212 ltemp
= ((time_offset
/ mtemp
) * (SCALE_USEC
/
1215 time_freq
+= ltemp
/ SCALE_KH
;
1218 if (mtemp
< MAXSEC
) {
1221 time_freq
+= (int)(((int64_t)ltemp
*
1222 SCALE_USEC
) / SCALE_KF
)
1223 / (1 << (time_constant
* 2));
1226 if (time_freq
> time_tolerance
)
1227 time_freq
= time_tolerance
;
1228 else if (time_freq
< -time_tolerance
)
1229 time_freq
= -time_tolerance
;
1231 s
= hr_clock_lock();
1237 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1239 * This routine is called at each PPS interrupt in order to discipline
1240 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1241 * and leaves it in a handy spot for the clock() routine. It
1242 * integrates successive PPS phase differences and calculates the
1243 * frequency offset. This is used in clock() to discipline the CPU
1244 * clock oscillator so that intrinsic frequency error is cancelled out.
1245 * The code requires the caller to capture the time and hardware counter
1246 * value at the on-time PPS signal transition.
1248 * Note that, on some Unix systems, this routine runs at an interrupt
1249 * priority level higher than the timer interrupt routine clock().
1250 * Therefore, the variables used are distinct from the clock()
1251 * variables, except for certain exceptions: The PPS frequency pps_freq
1252 * and phase pps_offset variables are determined by this routine and
1253 * updated atomically. The time_tolerance variable can be considered a
1254 * constant, since it is infrequently changed, and then only when the
1255 * PPS signal is disabled. The watchdog counter pps_valid is updated
1256 * once per second by clock() and is atomically cleared in this
1259 * tvp is the time of the last tick; usec is a microsecond count since the
1262 * Note: In Solaris systems, the tick value is actually given by
1263 * usec_per_tick. This is called from the serial driver cdintr(),
1264 * or equivalent, at a high PIL. Because the kernel keeps a
1265 * highresolution time, the following code can accept either
1266 * the traditional argument pair, or the current highres timestamp
1267 * in tvp and zero in usec.
1270 ddi_hardpps(struct timeval
*tvp
, int usec
)
1272 int u_usec
, v_usec
, bigtick
;
1277 * An occasional glitch can be produced when the PPS interrupt
1278 * occurs in the clock() routine before the time variable is
1279 * updated. Here the offset is discarded when the difference
1280 * between it and the last one is greater than tick/2, but not
1281 * if the interval since the first discard exceeds 30 s.
1283 time_status
|= STA_PPSSIGNAL
;
1284 time_status
&= ~(STA_PPSJITTER
| STA_PPSWANDER
| STA_PPSERROR
);
1286 u_usec
= -tvp
->tv_usec
;
1287 if (u_usec
< -(MICROSEC
/2))
1289 v_usec
= pps_offset
- u_usec
;
1292 if (v_usec
> (usec_per_tick
>> 1)) {
1293 if (pps_glitch
> MAXGLITCH
) {
1299 u_usec
= pps_offset
;
1305 * A three-stage median filter is used to help deglitch the pps
1306 * time. The median sample becomes the time offset estimate; the
1307 * difference between the other two samples becomes the time
1308 * dispersion (jitter) estimate.
1310 pps_tf
[2] = pps_tf
[1];
1311 pps_tf
[1] = pps_tf
[0];
1313 if (pps_tf
[0] > pps_tf
[1]) {
1314 if (pps_tf
[1] > pps_tf
[2]) {
1315 pps_offset
= pps_tf
[1]; /* 0 1 2 */
1316 v_usec
= pps_tf
[0] - pps_tf
[2];
1317 } else if (pps_tf
[2] > pps_tf
[0]) {
1318 pps_offset
= pps_tf
[0]; /* 2 0 1 */
1319 v_usec
= pps_tf
[2] - pps_tf
[1];
1321 pps_offset
= pps_tf
[2]; /* 0 2 1 */
1322 v_usec
= pps_tf
[0] - pps_tf
[1];
1325 if (pps_tf
[1] < pps_tf
[2]) {
1326 pps_offset
= pps_tf
[1]; /* 2 1 0 */
1327 v_usec
= pps_tf
[2] - pps_tf
[0];
1328 } else if (pps_tf
[2] < pps_tf
[0]) {
1329 pps_offset
= pps_tf
[0]; /* 1 0 2 */
1330 v_usec
= pps_tf
[1] - pps_tf
[2];
1332 pps_offset
= pps_tf
[2]; /* 1 2 0 */
1333 v_usec
= pps_tf
[1] - pps_tf
[0];
1336 if (v_usec
> MAXTIME
)
1338 v_usec
= (v_usec
<< PPS_AVG
) - pps_jitter
;
1339 pps_jitter
+= v_usec
/ (1 << PPS_AVG
);
1340 if (pps_jitter
> (MAXTIME
>> 1))
1341 time_status
|= STA_PPSJITTER
;
1344 * During the calibration interval adjust the starting time when
1345 * the tick overflows. At the end of the interval compute the
1346 * duration of the interval and the difference of the hardware
1347 * counters at the beginning and end of the interval. This code
1348 * is deliciously complicated by the fact valid differences may
1349 * exceed the value of tick when using long calibration
1350 * intervals and small ticks. Note that the counter can be
1351 * greater than tick if caught at just the wrong instant, but
1352 * the values returned and used here are correct.
1354 bigtick
= (int)usec_per_tick
* SCALE_USEC
;
1355 pps_usec
-= pps_freq
;
1356 if (pps_usec
>= bigtick
)
1357 pps_usec
-= bigtick
;
1359 pps_usec
+= bigtick
;
1362 if (pps_count
< (1 << pps_shift
))
1366 u_usec
= usec
* SCALE_USEC
;
1367 v_usec
= pps_usec
- u_usec
;
1368 if (v_usec
>= bigtick
>> 1)
1370 if (v_usec
< -(bigtick
>> 1))
1373 v_usec
= -(-v_usec
>> pps_shift
);
1375 v_usec
= v_usec
>> pps_shift
;
1377 cal_sec
= tvp
->tv_sec
;
1378 cal_usec
= tvp
->tv_usec
;
1379 cal_sec
-= pps_time
.tv_sec
;
1380 cal_usec
-= pps_time
.tv_usec
;
1382 cal_usec
+= MICROSEC
;
1388 * Check for lost interrupts, noise, excessive jitter and
1389 * excessive frequency error. The number of timer ticks during
1390 * the interval may vary +-1 tick. Add to this a margin of one
1391 * tick for the PPS signal jitter and maximum frequency
1392 * deviation. If the limits are exceeded, the calibration
1393 * interval is reset to the minimum and we start over.
1395 u_usec
= (int)usec_per_tick
<< 1;
1396 if (!((cal_sec
== -1 && cal_usec
> (MICROSEC
- u_usec
)) ||
1397 (cal_sec
== 0 && cal_usec
< u_usec
)) ||
1398 v_usec
> time_tolerance
|| v_usec
< -time_tolerance
) {
1400 pps_shift
= PPS_SHIFT
;
1402 time_status
|= STA_PPSERROR
;
1407 * A three-stage median filter is used to help deglitch the pps
1408 * frequency. The median sample becomes the frequency offset
1409 * estimate; the difference between the other two samples
1410 * becomes the frequency dispersion (stability) estimate.
1412 pps_ff
[2] = pps_ff
[1];
1413 pps_ff
[1] = pps_ff
[0];
1415 if (pps_ff
[0] > pps_ff
[1]) {
1416 if (pps_ff
[1] > pps_ff
[2]) {
1417 u_usec
= pps_ff
[1]; /* 0 1 2 */
1418 v_usec
= pps_ff
[0] - pps_ff
[2];
1419 } else if (pps_ff
[2] > pps_ff
[0]) {
1420 u_usec
= pps_ff
[0]; /* 2 0 1 */
1421 v_usec
= pps_ff
[2] - pps_ff
[1];
1423 u_usec
= pps_ff
[2]; /* 0 2 1 */
1424 v_usec
= pps_ff
[0] - pps_ff
[1];
1427 if (pps_ff
[1] < pps_ff
[2]) {
1428 u_usec
= pps_ff
[1]; /* 2 1 0 */
1429 v_usec
= pps_ff
[2] - pps_ff
[0];
1430 } else if (pps_ff
[2] < pps_ff
[0]) {
1431 u_usec
= pps_ff
[0]; /* 1 0 2 */
1432 v_usec
= pps_ff
[1] - pps_ff
[2];
1434 u_usec
= pps_ff
[2]; /* 1 2 0 */
1435 v_usec
= pps_ff
[1] - pps_ff
[0];
1440 * Here the frequency dispersion (stability) is updated. If it
1441 * is less than one-fourth the maximum (MAXFREQ), the frequency
1442 * offset is updated as well, but clamped to the tolerance. It
1443 * will be processed later by the clock() routine.
1445 v_usec
= (v_usec
>> 1) - pps_stabil
;
1447 pps_stabil
-= -v_usec
>> PPS_AVG
;
1449 pps_stabil
+= v_usec
>> PPS_AVG
;
1450 if (pps_stabil
> MAXFREQ
>> 2) {
1452 time_status
|= STA_PPSWANDER
;
1455 if (time_status
& STA_PPSFREQ
) {
1457 pps_freq
-= -u_usec
>> PPS_AVG
;
1458 if (pps_freq
< -time_tolerance
)
1459 pps_freq
= -time_tolerance
;
1462 pps_freq
+= u_usec
>> PPS_AVG
;
1463 if (pps_freq
> time_tolerance
)
1464 pps_freq
= time_tolerance
;
1469 * Here the calibration interval is adjusted. If the maximum
1470 * time difference is greater than tick / 4, reduce the interval
1471 * by half. If this is not the case for four consecutive
1472 * intervals, double the interval.
1474 if (u_usec
<< pps_shift
> bigtick
>> 2) {
1476 if (pps_shift
> PPS_SHIFT
)
1478 } else if (pps_intcnt
>= 4) {
1480 if (pps_shift
< PPS_SHIFTMAX
)
1486 * If recovering from kmdb, then make sure the tod chip gets resynced.
1487 * If we took an early exit above, then we don't yet have a stable
1488 * calibration signal to lock onto, so don't mark the tod for sync
1489 * until we get all the way here.
1492 int s
= hr_clock_lock();
1500 * Handle clock tick processing for a thread.
1501 * Check for timer action, enforce CPU rlimit, do profiling etc.
1504 clock_tick(kthread_t
*t
, int pending
)
1510 int poke
= 0; /* notify another CPU */
1513 int i
, total_usec
, usec
;
1516 ASSERT(pending
> 0);
1518 /* Must be operating on a lwp/thread */
1519 if ((lwp
= ttolwp(t
)) == NULL
) {
1520 panic("clock_tick: no lwp");
1524 for (i
= 0; i
< pending
; i
++) {
1525 CL_TICK(t
); /* Class specific tick processing */
1526 DTRACE_SCHED1(tick
, kthread_t
*, t
);
1531 /* pp->p_lock makes sure that the thread does not exit */
1532 ASSERT(MUTEX_HELD(&pp
->p_lock
));
1534 user_mode
= (lwp
->lwp_state
== LWP_USER
);
1536 ticks
= (pp
->p_utime
+ pp
->p_stime
) % hz
;
1538 * Update process times. Should use high res clock and state
1539 * changes instead of statistical sampling method. XXX
1542 pp
->p_utime
+= pending
;
1544 pp
->p_stime
+= pending
;
1547 pp
->p_ttime
+= pending
;
1551 * Update user profiling statistics. Get the pc from the
1552 * lwp when the AST happens.
1554 if (pp
->p_prof
.pr_scale
) {
1555 atomic_add_32(&lwp
->lwp_oweupc
, (int32_t)pending
);
1563 * If CPU was in user state, process lwp-virtual time
1564 * interval timer. The value passed to itimerdecr() has to be
1565 * in microseconds and has to be less than one second. Hence
1568 total_usec
= usec_per_tick
* pending
;
1569 while (total_usec
> 0) {
1570 usec
= MIN(total_usec
, (MICROSEC
- 1));
1572 timerisset(&lwp
->lwp_timer
[ITIMER_VIRTUAL
].it_value
) &&
1573 itimerdecr(&lwp
->lwp_timer
[ITIMER_VIRTUAL
], usec
) == 0) {
1575 sigtoproc(pp
, t
, SIGVTALRM
);
1581 * If CPU was in user state, process lwp-profile
1584 total_usec
= usec_per_tick
* pending
;
1585 while (total_usec
> 0) {
1586 usec
= MIN(total_usec
, (MICROSEC
- 1));
1587 if (timerisset(&lwp
->lwp_timer
[ITIMER_PROF
].it_value
) &&
1588 itimerdecr(&lwp
->lwp_timer
[ITIMER_PROF
], usec
) == 0) {
1590 sigtoproc(pp
, t
, SIGPROF
);
1596 * Enforce CPU resource controls:
1597 * (a) process.max-cpu-time resource control
1599 * Perform the check only if we have accumulated more a second.
1601 if ((ticks
+ pending
) >= hz
) {
1602 (void) rctl_test(rctlproc_legacy
[RLIMIT_CPU
], pp
->p_rctls
, pp
,
1603 (pp
->p_utime
+ pp
->p_stime
)/hz
, RCA_UNSAFE_SIGINFO
);
1607 * (b) task.max-cpu-time resource control
1609 * If we have accumulated enough ticks, increment the task CPU
1610 * time usage and test for the resource limit. This minimizes the
1611 * number of calls to the rct_test(). The task CPU time mutex
1612 * is highly contentious as many processes can be sharing a task.
1614 if (pp
->p_ttime
>= clock_tick_proc_max
) {
1615 secs
= task_cpu_time_incr(pp
->p_task
, pp
->p_ttime
);
1618 (void) rctl_test(rc_task_cpu_time
, pp
->p_task
->tk_rctls
,
1619 pp
, secs
, RCA_UNSAFE_SIGINFO
);
1624 * Update memory usage for the currently running process.
1627 PTOU(pp
)->u_mem
+= rss
;
1628 if (rss
> PTOU(pp
)->u_mem_max
)
1629 PTOU(pp
)->u_mem_max
= rss
;
1632 * Notify the CPU the thread is running on.
1634 if (poke
&& t
->t_cpu
!= CPU
)
1635 poke_cpu(t
->t_cpu
->cpu_id
);
1639 profil_tick(uintptr_t upc
)
1642 proc_t
*p
= ttoproc(curthread
);
1643 klwp_t
*lwp
= ttolwp(curthread
);
1644 struct prof
*pr
= &p
->p_prof
;
1647 ticks
= lwp
->lwp_oweupc
;
1648 } while (cas32(&lwp
->lwp_oweupc
, ticks
, 0) != ticks
);
1650 mutex_enter(&p
->p_pflock
);
1651 if (pr
->pr_scale
>= 2 && upc
>= pr
->pr_off
) {
1653 * Old-style profiling
1655 uint16_t *slot
= pr
->pr_base
;
1657 if (pr
->pr_scale
!= 2) {
1658 uintptr_t delta
= upc
- pr
->pr_off
;
1659 uintptr_t byteoff
= ((delta
>> 16) * pr
->pr_scale
) +
1660 (((delta
& 0xffff) * pr
->pr_scale
) >> 16);
1661 if (byteoff
>= (uintptr_t)pr
->pr_size
) {
1662 mutex_exit(&p
->p_pflock
);
1665 slot
+= byteoff
/ sizeof (uint16_t);
1667 if (fuword16(slot
, &old
) < 0 ||
1668 (new = old
+ ticks
) > SHRT_MAX
||
1669 suword16(slot
, new) < 0) {
1672 } else if (pr
->pr_scale
== 1) {
1676 model_t model
= lwp_getdatamodel(lwp
);
1681 while (ticks
-- > 0) {
1682 if (pr
->pr_samples
== pr
->pr_size
) {
1683 /* buffer full, turn off sampling */
1687 switch (SIZEOF_PTR(model
)) {
1688 case sizeof (uint32_t):
1689 result
= suword32(pr
->pr_base
, (uint32_t)upc
);
1692 case sizeof (uint64_t):
1693 result
= suword64(pr
->pr_base
, (uint64_t)upc
);
1697 cmn_err(CE_WARN
, "profil_tick: unexpected "
1706 pr
->pr_base
= (caddr_t
)pr
->pr_base
+ SIZEOF_PTR(model
);
1710 mutex_exit(&p
->p_pflock
);
1714 delay_wakeup(void *arg
)
1718 mutex_enter(&t
->t_delay_lock
);
1719 cv_signal(&t
->t_delay_cv
);
1720 mutex_exit(&t
->t_delay_lock
);
1724 * The delay(9F) man page indicates that it can only be called from user or
1725 * kernel context - detect and diagnose bad calls. The following macro will
1726 * produce a limited number of messages identifying bad callers. This is done
1727 * in a macro so that caller() is meaningful. When a bad caller is identified,
1728 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1730 #define DELAY_CONTEXT_CHECK() { \
1735 m = delay_from_interrupt_msg; \
1736 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1737 !panicstr && !devinfo_freeze && \
1738 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1739 f = modgetsymname((uintptr_t)caller(), &off); \
1740 cmn_err(CE_WARN, "delay(9F) called from " \
1741 "interrupt context: %s`%s", \
1742 mod_containing_pc(caller()), f ? f : "..."); \
1747 * delay_common: common delay code.
1750 delay_common(clock_t ticks
)
1752 kthread_t
*t
= curthread
;
1757 /* If timeouts aren't running all we can do is spin. */
1758 if (panicstr
|| devinfo_freeze
) {
1759 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1761 drv_usecwait(TICK_TO_USEC(ticks
));
1765 deadline
= ddi_get_lbolt() + ticks
;
1766 while ((timeleft
= deadline
- ddi_get_lbolt()) > 0) {
1767 mutex_enter(&t
->t_delay_lock
);
1768 id
= timeout_default(delay_wakeup
, t
, timeleft
);
1769 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1770 mutex_exit(&t
->t_delay_lock
);
1771 (void) untimeout_default(id
, 0);
1776 * Delay specified number of clock ticks.
1779 delay(clock_t ticks
)
1781 DELAY_CONTEXT_CHECK();
1783 delay_common(ticks
);
1787 * Delay a random number of clock ticks between 1 and ticks.
1790 delay_random(clock_t ticks
)
1794 DELAY_CONTEXT_CHECK();
1796 (void) random_get_pseudo_bytes((void *)&r
, sizeof (r
));
1799 ticks
= (r
% ticks
) + 1;
1800 delay_common(ticks
);
1804 * Like delay, but interruptible by a signal.
1807 delay_sig(clock_t ticks
)
1809 kthread_t
*t
= curthread
;
1813 /* If timeouts aren't running all we can do is spin. */
1814 if (panicstr
|| devinfo_freeze
) {
1816 drv_usecwait(TICK_TO_USEC(ticks
));
1820 deadline
= ddi_get_lbolt() + ticks
;
1821 mutex_enter(&t
->t_delay_lock
);
1823 rc
= cv_timedwait_sig(&t
->t_delay_cv
,
1824 &t
->t_delay_lock
, deadline
);
1825 /* loop until past deadline or signaled */
1827 mutex_exit(&t
->t_delay_lock
);
1834 #define SECONDS_PER_DAY 86400
1837 * Initialize the system time based on the TOD chip. approx is used as
1838 * an approximation of time (e.g. from the filesystem) in the event that
1839 * the TOD chip has been cleared or is unresponsive. An approx of -1
1840 * means the filesystem doesn't keep time.
1843 clkset(time_t approx
)
1849 mutex_enter(&tod_lock
);
1852 if (ts
.tv_sec
> 365 * SECONDS_PER_DAY
) {
1854 * If the TOD chip is reporting some time after 1971,
1855 * then it probably didn't lose power or become otherwise
1856 * cleared in the recent past; check to assure that
1857 * the time coming from the filesystem isn't in the future
1858 * according to the TOD chip.
1860 if (approx
!= -1 && approx
> ts
.tv_sec
) {
1861 cmn_err(CE_WARN
, "Last shutdown is later "
1862 "than time on time-of-day chip; check date.");
1866 * If the TOD chip isn't giving correct time, set it to the
1867 * greater of i) approx and ii) 1987. That way if approx
1868 * is negative or is earlier than 1987, we set the clock
1869 * back to a time when Oliver North, ALF and Dire Straits
1870 * were all on the collective brain: 1987.
1873 time_t diagnose_date
= (1987 - 1970) * 365 * SECONDS_PER_DAY
;
1874 ts
.tv_sec
= (approx
> diagnose_date
? approx
: diagnose_date
);
1878 * Attempt to write the new time to the TOD chip. Set spl high
1879 * to avoid getting preempted between the tod_set and tod_get.
1886 if (tmp
.tv_sec
!= ts
.tv_sec
&& tmp
.tv_sec
!= ts
.tv_sec
+ 1) {
1889 cmn_err(CE_WARN
, "Time-of-day chip unresponsive.");
1891 cmn_err(CE_WARN
, "Time-of-day chip had "
1892 "incorrect date; check and reset.");
1898 boot_time
= ts
.tv_sec
;
1905 mutex_exit(&tod_lock
);
1908 int timechanged
; /* for testing if the system time has been reset */
1911 set_hrestime(timestruc_t
*ts
)
1913 int spl
= hr_clock_lock();
1915 membar_enter(); /* hrestime must be visible before timechanged++ */
1918 hr_clock_unlock(spl
);
1922 static uint_t deadman_seconds
;
1923 static uint32_t deadman_panics
;
1924 static int deadman_enabled
= 0;
1925 static int deadman_panic_timers
= 1;
1932 * During panic, other CPUs besides the panic
1933 * master continue to handle cyclics and some other
1934 * interrupts. The code below is intended to be
1935 * single threaded, so any CPU other than the master
1938 if (CPU
->cpu_id
!= panic_cpu
.cpu_id
)
1941 if (!deadman_panic_timers
)
1942 return; /* allow all timers to be manually disabled */
1945 * If we are generating a crash dump or syncing filesystems and
1946 * the corresponding timer is set, decrement it and re-enter
1947 * the panic code to abort it and advance to the next state.
1948 * The panic states and triggers are explained in panic.c.
1951 if (dump_timeleft
&& (--dump_timeleft
== 0)) {
1952 panic("panic dump timeout");
1955 } else if (panic_sync
) {
1956 if (sync_timeleft
&& (--sync_timeleft
== 0)) {
1957 panic("panic sync timeout");
1965 if (deadman_counter
!= CPU
->cpu_deadman_counter
) {
1966 CPU
->cpu_deadman_counter
= deadman_counter
;
1967 CPU
->cpu_deadman_countdown
= deadman_seconds
;
1971 if (--CPU
->cpu_deadman_countdown
> 0)
1975 * Regardless of whether or not we actually bring the system down,
1976 * bump the deadman_panics variable.
1978 * N.B. deadman_panics is incremented once for each CPU that
1979 * passes through here. It's expected that all the CPUs will
1980 * detect this condition within one second of each other, so
1981 * when deadman_enabled is off, deadman_panics will
1982 * typically be a multiple of the total number of CPUs in
1985 atomic_add_32(&deadman_panics
, 1);
1987 if (!deadman_enabled
) {
1988 CPU
->cpu_deadman_countdown
= deadman_seconds
;
1993 * If we're here, we want to bring the system down.
1995 panic("deadman: timed out after %d seconds of clock "
1996 "inactivity", deadman_seconds
);
2002 deadman_online(void *arg
, cpu_t
*cpu
, cyc_handler_t
*hdlr
, cyc_time_t
*when
)
2004 cpu
->cpu_deadman_counter
= 0;
2005 cpu
->cpu_deadman_countdown
= deadman_seconds
;
2007 hdlr
->cyh_func
= (cyc_func_t
)deadman
;
2008 hdlr
->cyh_level
= CY_HIGH_LEVEL
;
2009 hdlr
->cyh_arg
= NULL
;
2012 * Stagger the CPUs so that they don't all run deadman() at
2013 * the same time. Simplest reason to do this is to make it
2014 * more likely that only one CPU will panic in case of a
2015 * timeout. This is (strictly speaking) an aesthetic, not a
2016 * technical consideration.
2018 when
->cyt_when
= cpu
->cpu_id
* (NANOSEC
/ NCPU
);
2019 when
->cyt_interval
= NANOSEC
;
2026 cyc_omni_handler_t hdlr
;
2028 if (deadman_seconds
== 0)
2029 deadman_seconds
= snoop_interval
/ MICROSEC
;
2032 deadman_enabled
= 1;
2034 hdlr
.cyo_online
= deadman_online
;
2035 hdlr
.cyo_offline
= NULL
;
2036 hdlr
.cyo_arg
= NULL
;
2038 mutex_enter(&cpu_lock
);
2039 deadman_cyclic
= cyclic_add_omni(&hdlr
);
2040 mutex_exit(&cpu_lock
);
2044 * tod_fault() is for updating tod validate mechanism state:
2045 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2046 * currently used for debugging only
2047 * (2) The following four cases detected by tod validate mechanism:
2048 * TOD_REVERSED: current tod value is less than previous value.
2049 * TOD_STALLED: current tod value hasn't advanced.
2050 * TOD_JUMPED: current tod value advanced too far from previous value.
2051 * TOD_RATECHANGED: the ratio between average tod delta and
2052 * average tick delta has changed.
2053 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2054 * a virtual TOD provided by a hypervisor.
2057 tod_fault(enum tod_fault_type ftype
, int off
)
2059 ASSERT(MUTEX_HELD(&tod_lock
));
2061 if (tod_faulted
!= ftype
) {
2064 plat_tod_fault(TOD_NOFAULT
);
2065 cmn_err(CE_NOTE
, "Restarted tracking "
2066 "Time of Day clock.");
2067 tod_faulted
= ftype
;
2071 if (tod_faulted
== TOD_NOFAULT
) {
2072 plat_tod_fault(ftype
);
2073 cmn_err(CE_WARN
, "Time of Day clock error: "
2074 "reason [%s by 0x%x]. -- "
2075 " Stopped tracking Time Of Day clock.",
2076 tod_fault_table
[ftype
], off
);
2077 tod_faulted
= ftype
;
2081 case TOD_RATECHANGED
:
2082 if (tod_faulted
== TOD_NOFAULT
) {
2083 plat_tod_fault(ftype
);
2084 cmn_err(CE_WARN
, "Time of Day clock error: "
2086 " Stopped tracking Time Of Day clock.",
2087 tod_fault_table
[ftype
]);
2088 tod_faulted
= ftype
;
2092 if (tod_faulted
== TOD_NOFAULT
) {
2093 plat_tod_fault(ftype
);
2094 cmn_err(CE_NOTE
, "!Time of Day clock is "
2095 "Read-Only; set of Date/Time will not "
2096 "persist across reboot.");
2097 tod_faulted
= ftype
;
2104 return (tod_faulted
);
2108 * Two functions that allow tod_status_flag to be manipulated by functions
2109 * external to this file.
2113 tod_status_set(int tod_flag
)
2115 tod_status_flag
|= tod_flag
;
2119 tod_status_clear(int tod_flag
)
2121 tod_status_flag
&= ~tod_flag
;
2125 * Record a timestamp and the value passed to tod_set(). The next call to
2126 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2127 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2128 * tod_validate() will use prev_tick and prev_tod for this task but these
2129 * become obsolete, and will be re-assigned with the prev_set_* values,
2130 * in the case when the TOD is re-written.
2133 tod_set_prev(timestruc_t ts
)
2135 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2136 tod_validate_deferred
) {
2139 prev_set_tick
= gethrtime();
2141 * A negative value will be set to zero in utc_to_tod() so we fake
2142 * a zero here in such a case. This would need to change if the
2143 * behavior of utc_to_tod() changes.
2145 prev_set_tod
= ts
.tv_sec
< 0 ? 0 : ts
.tv_sec
;
2149 * tod_validate() is used for checking values returned by tod_get().
2150 * Four error cases can be detected by this routine:
2151 * TOD_REVERSED: current tod value is less than previous.
2152 * TOD_STALLED: current tod value hasn't advanced.
2153 * TOD_JUMPED: current tod value advanced too far from previous value.
2154 * TOD_RATECHANGED: the ratio between average tod delta and
2155 * average tick delta has changed.
2158 tod_validate(time_t tod
)
2167 enum tod_fault_type tod_bad
= TOD_NOFAULT
;
2169 static int firsttime
= 1;
2171 static time_t prev_tod
= 0;
2172 static hrtime_t prev_tick
= 0;
2173 static long dtick_avg
= TOD_REF_FREQ
;
2175 int cpr_resume_done
= 0;
2176 int dr_resume_done
= 0;
2178 hrtime_t tick
= gethrtime();
2180 ASSERT(MUTEX_HELD(&tod_lock
));
2183 * tod_validate_enable is patchable via /etc/system.
2184 * If TOD is already faulted, or if TOD validation is deferred,
2185 * there is nothing to do.
2187 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2188 tod_validate_deferred
) {
2193 * If this is the first time through, we just need to save the tod
2194 * we were called with and hrtime so we can use them next time to
2195 * validate tod_get().
2205 * Handle any flags that have been turned on by tod_status_set().
2206 * In the case where a tod_set() is done and then a subsequent
2207 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2208 * true), we treat the TOD_GET_FAILED with precedence by switching
2209 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2210 * until such time as tod_get() completes successfully.
2212 if (tod_status_flag
& TOD_GET_FAILED
) {
2214 * tod_get() has encountered an issue, possibly transitory,
2215 * when reading TOD. We'll just return the incoming tod
2216 * value (which is actually hrestime.tv_sec in this case)
2217 * and when we get a genuine tod, following a successful
2218 * tod_get(), we can validate using prev_tod and prev_tick.
2220 tod_status_flag
&= ~TOD_GET_FAILED
;
2222 } else if (tod_status_flag
& TOD_SET_DONE
) {
2224 * TOD has been modified. Just before the TOD was written,
2225 * tod_set_prev() saved tod and hrtime; we can now use
2226 * those values, prev_set_tod and prev_set_tick, to validate
2227 * the incoming tod that's just been read.
2229 prev_tod
= prev_set_tod
;
2230 prev_tick
= prev_set_tick
;
2231 dtick_avg
= TOD_REF_FREQ
;
2232 tod_status_flag
&= ~TOD_SET_DONE
;
2234 * If a tod_set() preceded a cpr_suspend() without an
2235 * intervening tod_validate(), we need to ensure that a
2236 * TOD_JUMPED condition is ignored.
2237 * Note this isn't a concern in the case of DR as we've
2238 * just reassigned dtick_avg, above.
2240 if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2241 cpr_resume_done
= 1;
2242 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2244 } else if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2246 * The system's coming back from a checkpoint resume.
2248 cpr_resume_done
= 1;
2249 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2251 * We need to handle the possibility of a CPR suspend
2252 * operation having been initiated whilst a DR event was
2255 if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2257 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2259 } else if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2261 * A Dynamic Reconfiguration event has taken place.
2264 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2268 switch (tod_unit_test
) {
2269 case 1: /* for testing jumping tod */
2270 tod
+= tod_test_injector
;
2273 case 2: /* for testing stuck tod bit */
2274 tod
|= 1 << tod_test_injector
;
2277 case 3: /* for testing stalled tod */
2281 case 4: /* reset tod fault status */
2282 (void) tod_fault(TOD_NOFAULT
, 0);
2289 diff_tod
= tod
- prev_tod
;
2290 diff_tick
= tick
- prev_tick
;
2292 ASSERT(diff_tick
>= 0);
2295 /* ERROR - tod reversed */
2296 tod_bad
= TOD_REVERSED
;
2297 off
= (int)(prev_tod
- tod
);
2298 } else if (diff_tod
== 0) {
2299 /* tod did not advance */
2300 if (diff_tick
> TOD_STALL_THRESHOLD
) {
2301 /* ERROR - tod stalled */
2302 tod_bad
= TOD_STALLED
;
2305 * Make sure we don't update prev_tick
2306 * so that diff_tick is calculated since
2307 * the first diff_tod == 0
2312 /* calculate dtick */
2313 dtick
= diff_tick
/ diff_tod
;
2315 /* update dtick averages */
2316 dtick_avg
+= ((dtick
- dtick_avg
) / TOD_FILTER_N
);
2319 * Calculate dtick_delta as
2320 * variation from reference freq in quartiles
2322 dtick_delta
= (dtick_avg
- TOD_REF_FREQ
) /
2323 (TOD_REF_FREQ
>> 2);
2326 * Even with a perfectly functioning TOD device,
2327 * when the number of elapsed seconds is low the
2328 * algorithm can calculate a rate that is beyond
2329 * tolerance, causing an error. The algorithm is
2330 * inaccurate when elapsed time is low (less than
2334 if (dtick
< TOD_JUMP_THRESHOLD
) {
2336 * If we've just done a CPR resume, we detect
2337 * a jump in the TOD but, actually, what's
2338 * happened is that the TOD has been increasing
2339 * whilst the system was suspended and the tick
2340 * count hasn't kept up. We consider the first
2341 * occurrence of this after a resume as normal
2342 * and ignore it; otherwise, in a non-resume
2343 * case, we regard it as a TOD problem.
2345 if (!cpr_resume_done
) {
2346 /* ERROR - tod jumped */
2347 tod_bad
= TOD_JUMPED
;
2348 off
= (int)diff_tod
;
2353 * If we've just done a DR resume, dtick_avg
2354 * can go a bit askew so we reset it and carry
2355 * on; otherwise, the TOD is in error.
2357 if (dr_resume_done
) {
2358 dtick_avg
= TOD_REF_FREQ
;
2360 /* ERROR - change in clock rate */
2361 tod_bad
= TOD_RATECHANGED
;
2367 if (tod_bad
!= TOD_NOFAULT
) {
2368 (void) tod_fault(tod_bad
, off
);
2371 * Disable dosynctodr since we are going to fault
2372 * the TOD chip anyway here
2377 * Set tod to the correct value from hrestime
2379 tod
= hrestime
.tv_sec
;
2388 calcloadavg(int nrun
, uint64_t *hp_ave
)
2390 static int64_t f
[3] = { 135, 27, 9 };
2395 * Compute load average over the last 1, 5, and 15 minutes
2396 * (60, 300, and 900 seconds). The constants in f[3] are for
2397 * exponential decay:
2398 * (1 - exp(-1/60)) << 13 = 135,
2399 * (1 - exp(-1/300)) << 13 = 27,
2400 * (1 - exp(-1/900)) << 13 = 9.
2404 * a little hoop-jumping to avoid integer overflow
2406 for (i
= 0; i
< 3; i
++) {
2407 q
= (hp_ave
[i
] >> 16) << 7;
2408 r
= (hp_ave
[i
] & 0xffff) << 7;
2409 hp_ave
[i
] += ((nrun
- q
) * f
[i
] - ((r
* f
[i
]) >> 16)) >> 4;
2414 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2415 * calculate the value of lbolt according to the current mode. In the event
2416 * driven mode (the default), lbolt is calculated by dividing the current hires
2417 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2418 * an internal variable is incremented at each firing of the lbolt cyclic
2419 * and returned by lbolt_cyclic_driven().
2421 * The system will transition from event to cyclic driven mode when the number
2422 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2423 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2424 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2425 * causing enough activity to cross the thresholds.
2428 lbolt_bootstrap(void)
2435 lbolt_ev_to_cyclic(caddr_t arg1
, caddr_t arg2
)
2440 ASSERT(lbolt_hybrid
!= lbolt_cyclic_driven
);
2445 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2448 * Align the next expiration to a clock tick boundary.
2450 exp
= ts
+ nsec_per_tick
- 1;
2451 exp
= (exp
/nsec_per_tick
) * nsec_per_tick
;
2453 ret
= cyclic_reprogram(lb_info
->id
.lbi_cyclic_id
, exp
);
2456 lbolt_hybrid
= lbolt_cyclic_driven
;
2457 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2458 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2462 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2469 lbolt_event_driven(void)
2473 int ret
, cpu
= CPU
->cpu_seqid
;
2478 ASSERT(nsec_per_tick
> 0);
2479 lb
= (ts
/nsec_per_tick
);
2482 * Switch to cyclic mode if the number of calls to this routine
2483 * has reached the threshold within the interval.
2485 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) < lb_info
->lbi_thresh_interval
) {
2487 if (--lb_cpu
[cpu
].lbc_counter
== 0) {
2489 * Reached the threshold within the interval, reset
2490 * the usage statistics.
2492 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2493 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2496 * Make sure only one thread reprograms the
2497 * lbolt cyclic and changes the mode.
2499 if (panicstr
== NULL
&&
2500 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2502 if (lbolt_hybrid
== lbolt_cyclic_driven
) {
2503 ret
= atomic_dec_32_nv(
2504 &lb_info
->lbi_token
);
2507 lbolt_softint_post();
2513 * Exceeded the interval, reset the usage statistics.
2515 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2516 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2519 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2521 return (lb
- lb_info
->lbi_debug_time
);
2525 lbolt_cyclic_driven(void)
2527 int64_t lb
= lb_info
->lbi_internal
;
2531 * If a CPU has already prevented the lbolt cyclic from deactivating
2532 * itself, don't bother tracking the usage. Otherwise check if we're
2533 * within the interval and how the per CPU counter is doing.
2535 if (lb_info
->lbi_cyc_deactivate
) {
2536 cpu
= CPU
->cpu_seqid
;
2537 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) <
2538 lb_info
->lbi_thresh_interval
) {
2540 if (lb_cpu
[cpu
].lbc_counter
== 0)
2542 * Reached the threshold within the interval,
2543 * prevent the lbolt cyclic from turning itself
2546 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2548 lb_cpu
[cpu
].lbc_counter
--;
2551 * Only reset the usage statistics when we have
2552 * exceeded the interval.
2554 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2555 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2559 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2561 return (lb
- lb_info
->lbi_debug_time
);
2565 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2566 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2567 * It is inactive by default, and will be activated when switching from event
2568 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2569 * by lbolt_cyclic_driven().
2576 lb_info
->lbi_internal
++;
2578 if (!lbolt_cyc_only
) {
2580 if (lb_info
->lbi_cyc_deactivate
) {
2582 * Switching from cyclic to event driven mode.
2584 if (panicstr
== NULL
&&
2585 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2587 if (lbolt_hybrid
== lbolt_event_driven
) {
2588 ret
= atomic_dec_32_nv(
2589 &lb_info
->lbi_token
);
2596 lbolt_hybrid
= lbolt_event_driven
;
2597 ret
= cyclic_reprogram(
2598 lb_info
->id
.lbi_cyclic_id
,
2604 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2610 * The lbolt cyclic should not try to deactivate itself before
2611 * the sampling period has elapsed.
2613 if (lb_info
->lbi_internal
- lb_info
->lbi_cyc_deac_start
>=
2614 lb_info
->lbi_thresh_interval
) {
2615 lb_info
->lbi_cyc_deactivate
= B_TRUE
;
2616 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2622 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2623 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2624 * called by the KDI system claim callbacks to record a hires timestamp at
2625 * debug enter time. lbolt_debug_return() is called by the sistem release
2626 * callbacks to account for the time spent in the debugger. The value is then
2627 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2628 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2631 lbolt_debug_entry(void)
2633 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2634 ASSERT(lb_info
!= NULL
);
2635 lb_info
->lbi_debug_ts
= gethrtime();
2640 * Calculate the time spent in the debugger and add it to the lbolt info
2641 * structure. We also update the internal lbolt value in case we were in
2642 * cyclic driven mode going in.
2645 lbolt_debug_return(void)
2649 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2650 ASSERT(lb_info
!= NULL
);
2651 ASSERT(nsec_per_tick
> 0);
2654 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2655 lb_info
->lbi_debug_time
+=
2656 ((ts
- lb_info
->lbi_debug_ts
)/nsec_per_tick
);
2658 lb_info
->lbi_debug_ts
= 0;