bootadm: remove dead #defines
[unleashed.git] / kernel / os / clock.c
blob3c268233ef49279acbe42422dcac33dcba2100c4
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2016 by Delphix. All rights reserved.
31 #include <sys/param.h>
32 #include <sys/t_lock.h>
33 #include <sys/types.h>
34 #include <sys/tuneable.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/cpuvar.h>
38 #include <sys/lgrp.h>
39 #include <sys/user.h>
40 #include <sys/proc.h>
41 #include <sys/callo.h>
42 #include <sys/kmem.h>
43 #include <sys/var.h>
44 #include <sys/cmn_err.h>
45 #include <sys/swap.h>
46 #include <sys/vmsystm.h>
47 #include <sys/class.h>
48 #include <sys/time.h>
49 #include <sys/debug.h>
50 #include <sys/vtrace.h>
51 #include <sys/spl.h>
52 #include <sys/atomic.h>
53 #include <sys/dumphdr.h>
54 #include <sys/archsystm.h>
55 #include <sys/fs/swapnode.h>
56 #include <sys/panic.h>
57 #include <sys/disp.h>
58 #include <sys/msacct.h>
59 #include <sys/mem_cage.h>
61 #include <vm/page.h>
62 #include <vm/anon.h>
63 #include <vm/rm.h>
64 #include <sys/cyclic.h>
65 #include <sys/cpupart.h>
66 #include <sys/rctl.h>
67 #include <sys/task.h>
68 #include <sys/sdt.h>
69 #include <sys/ddi_periodic.h>
70 #include <sys/random.h>
71 #include <sys/modctl.h>
72 #include <sys/zone.h>
75 * for NTP support
77 #include <sys/timex.h>
78 #include <sys/inttypes.h>
80 #include <sys/sunddi.h>
81 #include <sys/clock_impl.h>
84 * clock() is called straight from the clock cyclic; see clock_init().
86 * Functions:
87 * reprime clock
88 * maintain date
89 * jab the scheduler
92 extern kcondvar_t fsflush_cv;
93 extern sysinfo_t sysinfo;
94 extern vminfo_t vminfo;
95 extern int idleswtch; /* flag set while idle in pswtch() */
96 extern hrtime_t volatile devinfo_freeze;
99 * high-precision avenrun values. These are needed to make the
100 * regular avenrun values accurate.
102 static uint64_t hp_avenrun[3];
103 int avenrun[3]; /* FSCALED average run queue lengths */
104 time_t time; /* time in seconds since 1970 - for compatibility only */
106 static struct loadavg_s loadavg;
108 * Phase/frequency-lock loop (PLL/FLL) definitions
110 * The following variables are read and set by the ntp_adjtime() system
111 * call.
113 * time_state shows the state of the system clock, with values defined
114 * in the timex.h header file.
116 * time_status shows the status of the system clock, with bits defined
117 * in the timex.h header file.
119 * time_offset is used by the PLL/FLL to adjust the system time in small
120 * increments.
122 * time_constant determines the bandwidth or "stiffness" of the PLL.
124 * time_tolerance determines maximum frequency error or tolerance of the
125 * CPU clock oscillator and is a property of the architecture; however,
126 * in principle it could change as result of the presence of external
127 * discipline signals, for instance.
129 * time_precision is usually equal to the kernel tick variable; however,
130 * in cases where a precision clock counter or external clock is
131 * available, the resolution can be much less than this and depend on
132 * whether the external clock is working or not.
134 * time_maxerror is initialized by a ntp_adjtime() call and increased by
135 * the kernel once each second to reflect the maximum error bound
136 * growth.
138 * time_esterror is set and read by the ntp_adjtime() call, but
139 * otherwise not used by the kernel.
141 int32_t time_state = TIME_OK; /* clock state */
142 int32_t time_status = STA_UNSYNC; /* clock status bits */
143 int32_t time_offset = 0; /* time offset (us) */
144 int32_t time_constant = 0; /* pll time constant */
145 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
146 int32_t time_precision = 1; /* clock precision (us) */
147 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */
148 int32_t time_esterror = MAXPHASE; /* estimated error (us) */
151 * The following variables establish the state of the PLL/FLL and the
152 * residual time and frequency offset of the local clock. The scale
153 * factors are defined in the timex.h header file.
155 * time_phase and time_freq are the phase increment and the frequency
156 * increment, respectively, of the kernel time variable.
158 * time_freq is set via ntp_adjtime() from a value stored in a file when
159 * the synchronization daemon is first started. Its value is retrieved
160 * via ntp_adjtime() and written to the file about once per hour by the
161 * daemon.
163 * time_adj is the adjustment added to the value of tick at each timer
164 * interrupt and is recomputed from time_phase and time_freq at each
165 * seconds rollover.
167 * time_reftime is the second's portion of the system time at the last
168 * call to ntp_adjtime(). It is used to adjust the time_freq variable
169 * and to increase the time_maxerror as the time since last update
170 * increases.
172 int32_t time_phase = 0; /* phase offset (scaled us) */
173 int32_t time_freq = 0; /* frequency offset (scaled ppm) */
174 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */
175 int32_t time_reftime = 0; /* time at last adjustment (s) */
178 * The scale factors of the following variables are defined in the
179 * timex.h header file.
181 * pps_time contains the time at each calibration interval, as read by
182 * microtime(). pps_count counts the seconds of the calibration
183 * interval, the duration of which is nominally pps_shift in powers of
184 * two.
186 * pps_offset is the time offset produced by the time median filter
187 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
188 * this filter.
190 * pps_freq is the frequency offset produced by the frequency median
191 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
192 * by this filter.
194 * pps_usec is latched from a high resolution counter or external clock
195 * at pps_time. Here we want the hardware counter contents only, not the
196 * contents plus the time_tv.usec as usual.
198 * pps_valid counts the number of seconds since the last PPS update. It
199 * is used as a watchdog timer to disable the PPS discipline should the
200 * PPS signal be lost.
202 * pps_glitch counts the number of seconds since the beginning of an
203 * offset burst more than tick/2 from current nominal offset. It is used
204 * mainly to suppress error bursts due to priority conflicts between the
205 * PPS interrupt and timer interrupt.
207 * pps_intcnt counts the calibration intervals for use in the interval-
208 * adaptation algorithm. It's just too complicated for words.
210 struct timeval pps_time; /* kernel time at last interval */
211 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
212 int32_t pps_offset = 0; /* pps time offset (us) */
213 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
214 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
215 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */
216 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
217 int32_t pps_usec = 0; /* microsec counter at last interval */
218 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */
219 int32_t pps_glitch = 0; /* pps signal glitch counter */
220 int32_t pps_count = 0; /* calibration interval counter (s) */
221 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
222 int32_t pps_intcnt = 0; /* intervals at current duration */
225 * PPS signal quality monitors
227 * pps_jitcnt counts the seconds that have been discarded because the
228 * jitter measured by the time median filter exceeds the limit MAXTIME
229 * (100 us).
231 * pps_calcnt counts the frequency calibration intervals, which are
232 * variable from 4 s to 256 s.
234 * pps_errcnt counts the calibration intervals which have been discarded
235 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
236 * calibration interval jitter exceeds two ticks.
238 * pps_stbcnt counts the calibration intervals that have been discarded
239 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
241 int32_t pps_jitcnt = 0; /* jitter limit exceeded */
242 int32_t pps_calcnt = 0; /* calibration intervals */
243 int32_t pps_errcnt = 0; /* calibration errors */
244 int32_t pps_stbcnt = 0; /* stability limit exceeded */
246 kcondvar_t lbolt_cv;
249 * Hybrid lbolt implementation:
251 * The service historically provided by the lbolt and lbolt64 variables has
252 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
253 * original symbols removed from the system. The once clock driven variables are
254 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
255 * the appropriate clock resolution. The default event driven implementation is
256 * complemented by a cyclic driven one, active only during periods of intense
257 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
258 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
259 * rely on the original low cost of consulting a memory position.
261 * The implementation uses the number of calls to these routines and the
262 * frequency of these to determine when to transition from event to cyclic
263 * driven and vice-versa. These values are kept on a per CPU basis for
264 * scalability reasons and to prevent CPUs from constantly invalidating a single
265 * cache line when modifying a global variable. The transition from event to
266 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
267 * can cause such transition.
269 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
270 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
271 * lbolt_cyclic_driven() according to the current mode. When the thresholds
272 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
273 * fire at a nsec_per_tick interval and increment an internal variable at
274 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
275 * will simply return the value of such variable. lbolt_cyclic() will attempt
276 * to shut itself off at each threshold interval (sampling period for calls
277 * to the DDI lbolt routines), and return to the event driven mode, but will
278 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
280 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
281 * for the cyclic subsystem to be intialized.
284 int64_t lbolt_bootstrap(void);
285 int64_t lbolt_event_driven(void);
286 int64_t lbolt_cyclic_driven(void);
287 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
288 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
291 * lbolt's cyclic, installed by clock_init().
293 static void lbolt_cyclic(void);
296 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
297 * from switching back to event driven, once it reaches cyclic mode.
299 static boolean_t lbolt_cyc_only = B_FALSE;
302 * Cache aligned, per CPU structure with lbolt usage statistics.
304 static lbolt_cpu_t *lb_cpu;
307 * Single, cache aligned, structure with all the information required by
308 * the lbolt implementation.
310 lbolt_info_t *lb_info;
313 int one_sec = 1; /* turned on once every second */
314 static int fsflushcnt; /* counter for t_fsflushr */
315 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */
316 int tod_needsync = 0; /* need to sync tod chip with software time */
317 static int tod_broken = 0; /* clock chip doesn't work */
318 time_t boot_time = 0; /* Boot time in seconds since 1970 */
319 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */
320 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */
322 extern void clock_tick_schedule(int);
324 static int lgrp_ticks; /* counter to schedule lgrp load calcs */
327 * for tod fault detection
329 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
330 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
331 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
332 #define TOD_FILTER_N 4
333 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
334 static enum tod_fault_type tod_faulted = TOD_NOFAULT;
336 static int tod_status_flag = 0; /* used by tod_validate() */
338 static hrtime_t prev_set_tick = 0; /* gethrtime() prior to tod_set() */
339 static time_t prev_set_tod = 0; /* tv_sec value passed to tod_set() */
341 /* patchable via /etc/system */
342 int tod_validate_enable = 1;
344 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
345 int delay_from_interrupt_diagnose = 0;
346 volatile uint32_t delay_from_interrupt_msg = 20;
349 * On non-SPARC systems, TOD validation must be deferred until gethrtime
350 * returns non-zero values (after mach_clkinit's execution).
351 * On SPARC systems, it must be deferred until after hrtime_base
352 * and hres_last_tick are set (in the first invocation of hres_tick).
353 * Since in both cases the prerequisites occur before the invocation of
354 * tod_get() in clock(), the deferment is lifted there.
356 static boolean_t tod_validate_deferred = B_TRUE;
359 * tod_fault_table[] must be aligned with
360 * enum tod_fault_type in systm.h
362 static char *tod_fault_table[] = {
363 "Reversed", /* TOD_REVERSED */
364 "Stalled", /* TOD_STALLED */
365 "Jumped", /* TOD_JUMPED */
366 "Changed in Clock Rate", /* TOD_RATECHANGED */
367 "Is Read-Only" /* TOD_RDONLY */
369 * no strings needed for TOD_NOFAULT
374 * test hook for tod broken detection in tod_validate
376 int tod_unit_test = 0;
377 time_t tod_test_injector;
379 #define CLOCK_ADJ_HIST_SIZE 4
381 static int adj_hist_entry;
383 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
385 static void calcloadavg(int, uint64_t *);
386 static int genloadavg(struct loadavg_s *);
387 static void loadavg_update();
389 void (*cmm_clock_callout)() = NULL;
390 void (*cpucaps_clock_callout)() = NULL;
392 extern clock_t clock_tick_proc_max;
394 static int64_t deadman_counter = 0;
396 static void
397 clock(void)
399 kthread_t *t;
400 uint_t nrunnable;
401 uint_t w_io;
402 cpu_t *cp;
403 cpupart_t *cpupart;
404 extern void set_freemem();
405 void (*funcp)();
406 int32_t ltemp;
407 int64_t lltemp;
408 int s;
409 int do_lgrp_load;
410 int i;
411 clock_t now = LBOLT_NO_ACCOUNT; /* current tick */
413 if (panicstr)
414 return;
417 * Make sure that 'freemem' do not drift too far from the truth
419 set_freemem();
423 * Before the section which is repeated is executed, we do
424 * the time delta processing which occurs every clock tick
426 * There is additional processing which happens every time
427 * the nanosecond counter rolls over which is described
428 * below - see the section which begins with : if (one_sec)
430 * This section marks the beginning of the precision-kernel
431 * code fragment.
433 * First, compute the phase adjustment. If the low-order bits
434 * (time_phase) of the update overflow, bump the higher order
435 * bits (time_update).
437 time_phase += time_adj;
438 if (time_phase <= -FINEUSEC) {
439 ltemp = -time_phase / SCALE_PHASE;
440 time_phase += ltemp * SCALE_PHASE;
441 s = hr_clock_lock();
442 timedelta -= ltemp * (NANOSEC/MICROSEC);
443 hr_clock_unlock(s);
444 } else if (time_phase >= FINEUSEC) {
445 ltemp = time_phase / SCALE_PHASE;
446 time_phase -= ltemp * SCALE_PHASE;
447 s = hr_clock_lock();
448 timedelta += ltemp * (NANOSEC/MICROSEC);
449 hr_clock_unlock(s);
453 * End of precision-kernel code fragment which is processed
454 * every timer interrupt.
456 * Continue with the interrupt processing as scheduled.
459 * Count the number of runnable threads and the number waiting
460 * for some form of I/O to complete -- gets added to
461 * sysinfo.waiting. To know the state of the system, must add
462 * wait counts from all CPUs. Also add up the per-partition
463 * statistics.
465 w_io = 0;
466 nrunnable = 0;
469 * keep track of when to update lgrp/part loads
472 do_lgrp_load = 0;
473 if (lgrp_ticks++ >= hz / 10) {
474 lgrp_ticks = 0;
475 do_lgrp_load = 1;
478 if (one_sec) {
479 loadavg_update();
480 deadman_counter++;
484 * First count the threads waiting on kpreempt queues in each
485 * CPU partition.
488 cpupart = cp_list_head;
489 do {
490 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
492 cpupart->cp_updates++;
493 nrunnable += cpupart_nrunnable;
494 cpupart->cp_nrunnable_cum += cpupart_nrunnable;
495 if (one_sec) {
496 cpupart->cp_nrunning = 0;
497 cpupart->cp_nrunnable = cpupart_nrunnable;
499 } while ((cpupart = cpupart->cp_next) != cp_list_head);
502 /* Now count the per-CPU statistics. */
503 cp = cpu_list;
504 do {
505 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
507 nrunnable += cpu_nrunnable;
508 cpupart = cp->cpu_part;
509 cpupart->cp_nrunnable_cum += cpu_nrunnable;
510 if (one_sec) {
511 cpupart->cp_nrunnable += cpu_nrunnable;
513 * Update user, system, and idle cpu times.
515 cpupart->cp_nrunning++;
517 * w_io is used to update sysinfo.waiting during
518 * one_second processing below. Only gather w_io
519 * information when we walk the list of cpus if we're
520 * going to perform one_second processing.
522 w_io += CPU_STATS(cp, sys.iowait);
525 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
526 int i, load, change;
527 hrtime_t intracct, intrused;
528 const hrtime_t maxnsec = 1000000000;
529 const int precision = 100;
532 * Estimate interrupt load on this cpu each second.
533 * Computes cpu_intrload as %utilization (0-99).
536 /* add up interrupt time from all micro states */
537 for (intracct = 0, i = 0; i < NCMSTATES; i++)
538 intracct += cp->cpu_intracct[i];
539 scalehrtime(&intracct);
541 /* compute nsec used in the past second */
542 intrused = intracct - cp->cpu_intrlast;
543 cp->cpu_intrlast = intracct;
545 /* limit the value for safety (and the first pass) */
546 if (intrused >= maxnsec)
547 intrused = maxnsec - 1;
549 /* calculate %time in interrupt */
550 load = (precision * intrused) / maxnsec;
551 ASSERT(load >= 0 && load < precision);
552 change = cp->cpu_intrload - load;
554 /* jump to new max, or decay the old max */
555 if (change < 0)
556 cp->cpu_intrload = load;
557 else if (change > 0)
558 cp->cpu_intrload -= (change + 3) / 4;
560 DTRACE_PROBE3(cpu_intrload,
561 cpu_t *, cp,
562 hrtime_t, intracct,
563 hrtime_t, intrused);
566 if (do_lgrp_load &&
567 (cp->cpu_flags & CPU_EXISTS)) {
569 * When updating the lgroup's load average,
570 * account for the thread running on the CPU.
571 * If the CPU is the current one, then we need
572 * to account for the underlying thread which
573 * got the clock interrupt not the thread that is
574 * handling the interrupt and caculating the load
575 * average
577 t = cp->cpu_thread;
578 if (CPU == cp)
579 t = t->t_intr;
582 * Account for the load average for this thread if
583 * it isn't the idle thread or it is on the interrupt
584 * stack and not the current CPU handling the clock
585 * interrupt
587 if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
588 CPU_ON_INTR(cp))) {
589 if (t->t_lpl == cp->cpu_lpl) {
590 /* local thread */
591 cpu_nrunnable++;
592 } else {
594 * This is a remote thread, charge it
595 * against its home lgroup. Note that
596 * we notice that a thread is remote
597 * only if it's currently executing.
598 * This is a reasonable approximation,
599 * since queued remote threads are rare.
600 * Note also that if we didn't charge
601 * it to its home lgroup, remote
602 * execution would often make a system
603 * appear balanced even though it was
604 * not, and thread placement/migration
605 * would often not be done correctly.
607 lgrp_loadavg(t->t_lpl,
608 LGRP_LOADAVG_IN_THREAD_MAX, 0);
611 lgrp_loadavg(cp->cpu_lpl,
612 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
614 } while ((cp = cp->cpu_next) != cpu_list);
616 clock_tick_schedule(one_sec);
619 * Check for a callout that needs be called from the clock
620 * thread to support the membership protocol in a clustered
621 * system. Copy the function pointer so that we can reset
622 * this to NULL if needed.
624 if ((funcp = cmm_clock_callout) != NULL)
625 (*funcp)();
627 if ((funcp = cpucaps_clock_callout) != NULL)
628 (*funcp)();
631 * Wakeup the cageout thread waiters once per second.
633 if (one_sec)
634 kcage_tick();
636 if (one_sec) {
638 int drift, absdrift;
639 timestruc_t tod;
640 int s;
643 * Beginning of precision-kernel code fragment executed
644 * every second.
646 * On rollover of the second the phase adjustment to be
647 * used for the next second is calculated. Also, the
648 * maximum error is increased by the tolerance. If the
649 * PPS frequency discipline code is present, the phase is
650 * increased to compensate for the CPU clock oscillator
651 * frequency error.
653 * On a 32-bit machine and given parameters in the timex.h
654 * header file, the maximum phase adjustment is +-512 ms
655 * and maximum frequency offset is (a tad less than)
656 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
658 time_maxerror += time_tolerance / SCALE_USEC;
661 * Leap second processing. If in leap-insert state at
662 * the end of the day, the system clock is set back one
663 * second; if in leap-delete state, the system clock is
664 * set ahead one second. The microtime() routine or
665 * external clock driver will insure that reported time
666 * is always monotonic. The ugly divides should be
667 * replaced.
669 switch (time_state) {
671 case TIME_OK:
672 if (time_status & STA_INS)
673 time_state = TIME_INS;
674 else if (time_status & STA_DEL)
675 time_state = TIME_DEL;
676 break;
678 case TIME_INS:
679 if (hrestime.tv_sec % 86400 == 0) {
680 s = hr_clock_lock();
681 hrestime.tv_sec--;
682 hr_clock_unlock(s);
683 time_state = TIME_OOP;
685 break;
687 case TIME_DEL:
688 if ((hrestime.tv_sec + 1) % 86400 == 0) {
689 s = hr_clock_lock();
690 hrestime.tv_sec++;
691 hr_clock_unlock(s);
692 time_state = TIME_WAIT;
694 break;
696 case TIME_OOP:
697 time_state = TIME_WAIT;
698 break;
700 case TIME_WAIT:
701 if (!(time_status & (STA_INS | STA_DEL)))
702 time_state = TIME_OK;
703 default:
704 break;
708 * Compute the phase adjustment for the next second. In
709 * PLL mode, the offset is reduced by a fixed factor
710 * times the time constant. In FLL mode the offset is
711 * used directly. In either mode, the maximum phase
712 * adjustment for each second is clamped so as to spread
713 * the adjustment over not more than the number of
714 * seconds between updates.
716 if (time_offset == 0)
717 time_adj = 0;
718 else if (time_offset < 0) {
719 lltemp = -time_offset;
720 if (!(time_status & STA_FLL)) {
721 if ((1 << time_constant) >= SCALE_KG)
722 lltemp *= (1 << time_constant) /
723 SCALE_KG;
724 else
725 lltemp = (lltemp / SCALE_KG) >>
726 time_constant;
728 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
729 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
730 time_offset += lltemp;
731 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
732 } else {
733 lltemp = time_offset;
734 if (!(time_status & STA_FLL)) {
735 if ((1 << time_constant) >= SCALE_KG)
736 lltemp *= (1 << time_constant) /
737 SCALE_KG;
738 else
739 lltemp = (lltemp / SCALE_KG) >>
740 time_constant;
742 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
743 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
744 time_offset -= lltemp;
745 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
749 * Compute the frequency estimate and additional phase
750 * adjustment due to frequency error for the next
751 * second. When the PPS signal is engaged, gnaw on the
752 * watchdog counter and update the frequency computed by
753 * the pll and the PPS signal.
755 pps_valid++;
756 if (pps_valid == PPS_VALID) {
757 pps_jitter = MAXTIME;
758 pps_stabil = MAXFREQ;
759 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
760 STA_PPSWANDER | STA_PPSERROR);
762 lltemp = time_freq + pps_freq;
764 if (lltemp)
765 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
768 * End of precision kernel-code fragment
770 * The section below should be modified if we are planning
771 * to use NTP for synchronization.
773 * Note: the clock synchronization code now assumes
774 * the following:
775 * - if dosynctodr is 1, then compute the drift between
776 * the tod chip and software time and adjust one or
777 * the other depending on the circumstances
779 * - if dosynctodr is 0, then the tod chip is independent
780 * of the software clock and should not be adjusted,
781 * but allowed to free run. this allows NTP to sync.
782 * hrestime without any interference from the tod chip.
785 tod_validate_deferred = B_FALSE;
786 mutex_enter(&tod_lock);
787 tod = tod_get();
788 drift = tod.tv_sec - hrestime.tv_sec;
789 absdrift = (drift >= 0) ? drift : -drift;
790 if (tod_needsync || absdrift > 1) {
791 int s;
792 if (absdrift > 2) {
793 if (!tod_broken && tod_faulted == TOD_NOFAULT) {
794 s = hr_clock_lock();
795 hrestime = tod;
796 membar_enter(); /* hrestime visible */
797 timedelta = 0;
798 timechanged++;
799 tod_needsync = 0;
800 hr_clock_unlock(s);
801 callout_hrestime();
804 } else {
805 if (tod_needsync || !dosynctodr) {
806 gethrestime(&tod);
807 tod_set(tod);
808 s = hr_clock_lock();
809 if (timedelta == 0)
810 tod_needsync = 0;
811 hr_clock_unlock(s);
812 } else {
814 * If the drift is 2 seconds on the
815 * money, then the TOD is adjusting
816 * the clock; record that.
818 clock_adj_hist[adj_hist_entry++ %
819 CLOCK_ADJ_HIST_SIZE] = now;
820 s = hr_clock_lock();
821 timedelta = (int64_t)drift*NANOSEC;
822 hr_clock_unlock(s);
826 one_sec = 0;
827 time = gethrestime_sec(); /* for crusty old kmem readers */
828 mutex_exit(&tod_lock);
831 * Some drivers still depend on this... XXX
833 cv_broadcast(&lbolt_cv);
835 vminfo.freemem += freemem;
837 pgcnt_t maxswap, resv, free;
838 pgcnt_t avail =
839 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
841 maxswap = k_anoninfo.ani_mem_resv +
842 k_anoninfo.ani_max +avail;
843 /* Update ani_free */
844 set_anoninfo();
845 free = k_anoninfo.ani_free + avail;
846 resv = k_anoninfo.ani_phys_resv +
847 k_anoninfo.ani_mem_resv;
849 vminfo.swap_resv += resv;
850 /* number of reserved and allocated pages */
851 #ifdef DEBUG
852 if (maxswap < free)
853 cmn_err(CE_WARN, "clock: maxswap < free");
854 if (maxswap < resv)
855 cmn_err(CE_WARN, "clock: maxswap < resv");
856 #endif
857 vminfo.swap_alloc += maxswap - free;
858 vminfo.swap_avail += maxswap - resv;
859 vminfo.swap_free += free;
861 vminfo.updates++;
862 if (nrunnable) {
863 sysinfo.runque += nrunnable;
864 sysinfo.runocc++;
866 if (nswapped) {
867 sysinfo.swpque += nswapped;
868 sysinfo.swpocc++;
870 sysinfo.waiting += w_io;
871 sysinfo.updates++;
874 * Wake up fsflush to write out DELWRI
875 * buffers, dirty pages and other cached
876 * administrative data, e.g. inodes.
878 if (--fsflushcnt <= 0) {
879 fsflushcnt = tune.t_fsflushr;
880 cv_signal(&fsflush_cv);
883 vmmeter();
884 calcloadavg(genloadavg(&loadavg), hp_avenrun);
885 for (i = 0; i < 3; i++)
887 * At the moment avenrun[] can only hold 31
888 * bits of load average as it is a signed
889 * int in the API. We need to ensure that
890 * hp_avenrun[i] >> (16 - FSHIFT) will not be
891 * too large. If it is, we put the largest value
892 * that we can use into avenrun[i]. This is
893 * kludgey, but about all we can do until we
894 * avenrun[] is declared as an array of uint64[]
896 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
897 avenrun[i] = (int32_t)(hp_avenrun[i] >>
898 (16 - FSHIFT));
899 else
900 avenrun[i] = 0x7fffffff;
902 cpupart = cp_list_head;
903 do {
904 calcloadavg(genloadavg(&cpupart->cp_loadavg),
905 cpupart->cp_hp_avenrun);
906 } while ((cpupart = cpupart->cp_next) != cp_list_head);
910 void
911 clock_init(void)
913 cyc_handler_t clk_hdlr, lbolt_hdlr;
914 cyc_time_t clk_when, lbolt_when;
915 int i, sz;
916 intptr_t buf;
919 * Setup handler and timer for the clock cyclic.
921 clk_hdlr.cyh_func = (cyc_func_t)clock;
922 clk_hdlr.cyh_level = CY_LOCK_LEVEL;
923 clk_hdlr.cyh_arg = NULL;
925 clk_when.cyt_when = 0;
926 clk_when.cyt_interval = nsec_per_tick;
929 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
930 * interval to satisfy performance needs of the DDI lbolt consumers.
931 * It is off by default.
933 lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
934 lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
935 lbolt_hdlr.cyh_arg = NULL;
937 lbolt_when.cyt_interval = nsec_per_tick;
940 * Allocate cache line aligned space for the per CPU lbolt data and
941 * lbolt info structures, and initialize them with their default
942 * values. Note that these structures are also cache line sized.
944 sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
945 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
946 lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
948 if (hz != HZ_DEFAULT)
949 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
950 hz/HZ_DEFAULT;
951 else
952 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
954 lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
956 sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
957 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
958 lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
960 for (i = 0; i < max_ncpus; i++)
961 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
964 * Install the softint used to switch between event and cyclic driven
965 * lbolt. We use a soft interrupt to make sure the context of the
966 * cyclic reprogram call is safe.
968 lbolt_softint_add();
971 * Since the hybrid lbolt implementation is based on a hardware counter
972 * that is reset at every hardware reboot and that we'd like to have
973 * the lbolt value starting at zero after both a hardware and a fast
974 * reboot, we calculate the number of clock ticks the system's been up
975 * and store it in the lbi_debug_time field of the lbolt info structure.
976 * The value of this field will be subtracted from lbolt before
977 * returning it.
979 lb_info->lbi_internal = lb_info->lbi_debug_time =
980 (gethrtime()/nsec_per_tick);
983 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
984 * and lbolt_debug_{enter,return} use this value as an indication that
985 * the initializaion above hasn't been completed. Setting lbolt_hybrid
986 * to either lbolt_{cyclic,event}_driven here signals those code paths
987 * that the lbolt related structures can be used.
989 if (lbolt_cyc_only) {
990 lbolt_when.cyt_when = 0;
991 lbolt_hybrid = lbolt_cyclic_driven;
992 } else {
993 lbolt_when.cyt_when = CY_INFINITY;
994 lbolt_hybrid = lbolt_event_driven;
998 * Grab cpu_lock and install all three cyclics.
1000 mutex_enter(&cpu_lock);
1002 clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
1003 lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
1005 mutex_exit(&cpu_lock);
1009 * Called before calcloadavg to get 10-sec moving loadavg together
1012 static int
1013 genloadavg(struct loadavg_s *avgs)
1015 int avg;
1016 int spos; /* starting position */
1017 int cpos; /* moving current position */
1018 int i;
1019 int slen;
1020 hrtime_t hr_avg;
1022 /* 10-second snapshot, calculate first positon */
1023 if (avgs->lg_len == 0) {
1024 return (0);
1026 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
1028 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
1029 S_LOADAVG_SZ + (avgs->lg_cur - 1);
1030 for (i = hr_avg = 0; i < slen; i++) {
1031 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
1032 hr_avg += avgs->lg_loads[cpos];
1035 hr_avg = hr_avg / slen;
1036 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
1038 return (avg);
1042 * Run every second from clock () to update the loadavg count available to the
1043 * system and cpu-partitions.
1045 * This works by sampling the previous usr, sys, wait time elapsed,
1046 * computing a delta, and adding that delta to the elapsed usr, sys,
1047 * wait increase.
1050 static void
1051 loadavg_update()
1053 cpu_t *cp;
1054 cpupart_t *cpupart;
1055 hrtime_t cpu_total;
1056 int prev;
1058 cp = cpu_list;
1059 loadavg.lg_total = 0;
1062 * first pass totals up per-cpu statistics for system and cpu
1063 * partitions
1066 do {
1067 struct loadavg_s *lavg;
1069 lavg = &cp->cpu_loadavg;
1071 cpu_total = cp->cpu_acct[CMS_USER] +
1072 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
1073 /* compute delta against last total */
1074 scalehrtime(&cpu_total);
1075 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
1076 S_LOADAVG_SZ + (lavg->lg_cur - 1);
1077 if (lavg->lg_loads[prev] <= 0) {
1078 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1079 cpu_total = 0;
1080 } else {
1081 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1082 cpu_total = cpu_total - lavg->lg_loads[prev];
1083 if (cpu_total < 0)
1084 cpu_total = 0;
1087 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1088 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1089 lavg->lg_len + 1 : S_LOADAVG_SZ;
1091 loadavg.lg_total += cpu_total;
1092 cp->cpu_part->cp_loadavg.lg_total += cpu_total;
1094 } while ((cp = cp->cpu_next) != cpu_list);
1096 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
1097 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
1098 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1099 loadavg.lg_len + 1 : S_LOADAVG_SZ;
1101 * Second pass updates counts
1103 cpupart = cp_list_head;
1105 do {
1106 struct loadavg_s *lavg;
1108 lavg = &cpupart->cp_loadavg;
1109 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1110 lavg->lg_total = 0;
1111 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1112 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1113 lavg->lg_len + 1 : S_LOADAVG_SZ;
1115 } while ((cpupart = cpupart->cp_next) != cp_list_head);
1118 * Third pass totals up per-zone statistics.
1120 zone_loadavg_update();
1124 * clock_update() - local clock update
1126 * This routine is called by ntp_adjtime() to update the local clock
1127 * phase and frequency. The implementation is of an
1128 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1129 * routine computes new time and frequency offset estimates for each
1130 * call. The PPS signal itself determines the new time offset,
1131 * instead of the calling argument. Presumably, calls to
1132 * ntp_adjtime() occur only when the caller believes the local clock
1133 * is valid within some bound (+-128 ms with NTP). If the caller's
1134 * time is far different than the PPS time, an argument will ensue,
1135 * and it's not clear who will lose.
1137 * For uncompensated quartz crystal oscillatores and nominal update
1138 * intervals less than 1024 s, operation should be in phase-lock mode
1139 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1140 * intervals greater than this, operation should be in frequency-lock
1141 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1143 * Note: mutex(&tod_lock) is in effect.
1145 void
1146 clock_update(int offset)
1148 int ltemp, mtemp, s;
1150 ASSERT(MUTEX_HELD(&tod_lock));
1152 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1153 return;
1154 ltemp = offset;
1155 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1156 ltemp = pps_offset;
1159 * Scale the phase adjustment and clamp to the operating range.
1161 if (ltemp > MAXPHASE)
1162 time_offset = MAXPHASE * SCALE_UPDATE;
1163 else if (ltemp < -MAXPHASE)
1164 time_offset = -(MAXPHASE * SCALE_UPDATE);
1165 else
1166 time_offset = ltemp * SCALE_UPDATE;
1169 * Select whether the frequency is to be controlled and in which
1170 * mode (PLL or FLL). Clamp to the operating range. Ugly
1171 * multiply/divide should be replaced someday.
1173 if (time_status & STA_FREQHOLD || time_reftime == 0)
1174 time_reftime = hrestime.tv_sec;
1176 mtemp = hrestime.tv_sec - time_reftime;
1177 time_reftime = hrestime.tv_sec;
1179 if (time_status & STA_FLL) {
1180 if (mtemp >= MINSEC) {
1181 ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1182 SCALE_UPDATE));
1183 if (ltemp)
1184 time_freq += ltemp / SCALE_KH;
1186 } else {
1187 if (mtemp < MAXSEC) {
1188 ltemp *= mtemp;
1189 if (ltemp)
1190 time_freq += (int)(((int64_t)ltemp *
1191 SCALE_USEC) / SCALE_KF)
1192 / (1 << (time_constant * 2));
1195 if (time_freq > time_tolerance)
1196 time_freq = time_tolerance;
1197 else if (time_freq < -time_tolerance)
1198 time_freq = -time_tolerance;
1200 s = hr_clock_lock();
1201 tod_needsync = 1;
1202 hr_clock_unlock(s);
1206 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1208 * This routine is called at each PPS interrupt in order to discipline
1209 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1210 * and leaves it in a handy spot for the clock() routine. It
1211 * integrates successive PPS phase differences and calculates the
1212 * frequency offset. This is used in clock() to discipline the CPU
1213 * clock oscillator so that intrinsic frequency error is cancelled out.
1214 * The code requires the caller to capture the time and hardware counter
1215 * value at the on-time PPS signal transition.
1217 * Note that, on some Unix systems, this routine runs at an interrupt
1218 * priority level higher than the timer interrupt routine clock().
1219 * Therefore, the variables used are distinct from the clock()
1220 * variables, except for certain exceptions: The PPS frequency pps_freq
1221 * and phase pps_offset variables are determined by this routine and
1222 * updated atomically. The time_tolerance variable can be considered a
1223 * constant, since it is infrequently changed, and then only when the
1224 * PPS signal is disabled. The watchdog counter pps_valid is updated
1225 * once per second by clock() and is atomically cleared in this
1226 * routine.
1228 * tvp is the time of the last tick; usec is a microsecond count since the
1229 * last tick.
1231 * Note: In Solaris systems, the tick value is actually given by
1232 * usec_per_tick. This is called from the serial driver cdintr(),
1233 * or equivalent, at a high PIL. Because the kernel keeps a
1234 * highresolution time, the following code can accept either
1235 * the traditional argument pair, or the current highres timestamp
1236 * in tvp and zero in usec.
1238 void
1239 ddi_hardpps(struct timeval *tvp, int usec)
1241 int u_usec, v_usec, bigtick;
1242 time_t cal_sec;
1243 int cal_usec;
1246 * An occasional glitch can be produced when the PPS interrupt
1247 * occurs in the clock() routine before the time variable is
1248 * updated. Here the offset is discarded when the difference
1249 * between it and the last one is greater than tick/2, but not
1250 * if the interval since the first discard exceeds 30 s.
1252 time_status |= STA_PPSSIGNAL;
1253 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1254 pps_valid = 0;
1255 u_usec = -tvp->tv_usec;
1256 if (u_usec < -(MICROSEC/2))
1257 u_usec += MICROSEC;
1258 v_usec = pps_offset - u_usec;
1259 if (v_usec < 0)
1260 v_usec = -v_usec;
1261 if (v_usec > (usec_per_tick >> 1)) {
1262 if (pps_glitch > MAXGLITCH) {
1263 pps_glitch = 0;
1264 pps_tf[2] = u_usec;
1265 pps_tf[1] = u_usec;
1266 } else {
1267 pps_glitch++;
1268 u_usec = pps_offset;
1270 } else
1271 pps_glitch = 0;
1274 * A three-stage median filter is used to help deglitch the pps
1275 * time. The median sample becomes the time offset estimate; the
1276 * difference between the other two samples becomes the time
1277 * dispersion (jitter) estimate.
1279 pps_tf[2] = pps_tf[1];
1280 pps_tf[1] = pps_tf[0];
1281 pps_tf[0] = u_usec;
1282 if (pps_tf[0] > pps_tf[1]) {
1283 if (pps_tf[1] > pps_tf[2]) {
1284 pps_offset = pps_tf[1]; /* 0 1 2 */
1285 v_usec = pps_tf[0] - pps_tf[2];
1286 } else if (pps_tf[2] > pps_tf[0]) {
1287 pps_offset = pps_tf[0]; /* 2 0 1 */
1288 v_usec = pps_tf[2] - pps_tf[1];
1289 } else {
1290 pps_offset = pps_tf[2]; /* 0 2 1 */
1291 v_usec = pps_tf[0] - pps_tf[1];
1293 } else {
1294 if (pps_tf[1] < pps_tf[2]) {
1295 pps_offset = pps_tf[1]; /* 2 1 0 */
1296 v_usec = pps_tf[2] - pps_tf[0];
1297 } else if (pps_tf[2] < pps_tf[0]) {
1298 pps_offset = pps_tf[0]; /* 1 0 2 */
1299 v_usec = pps_tf[1] - pps_tf[2];
1300 } else {
1301 pps_offset = pps_tf[2]; /* 1 2 0 */
1302 v_usec = pps_tf[1] - pps_tf[0];
1305 if (v_usec > MAXTIME)
1306 pps_jitcnt++;
1307 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1308 pps_jitter += v_usec / (1 << PPS_AVG);
1309 if (pps_jitter > (MAXTIME >> 1))
1310 time_status |= STA_PPSJITTER;
1313 * During the calibration interval adjust the starting time when
1314 * the tick overflows. At the end of the interval compute the
1315 * duration of the interval and the difference of the hardware
1316 * counters at the beginning and end of the interval. This code
1317 * is deliciously complicated by the fact valid differences may
1318 * exceed the value of tick when using long calibration
1319 * intervals and small ticks. Note that the counter can be
1320 * greater than tick if caught at just the wrong instant, but
1321 * the values returned and used here are correct.
1323 bigtick = (int)usec_per_tick * SCALE_USEC;
1324 pps_usec -= pps_freq;
1325 if (pps_usec >= bigtick)
1326 pps_usec -= bigtick;
1327 if (pps_usec < 0)
1328 pps_usec += bigtick;
1329 pps_time.tv_sec++;
1330 pps_count++;
1331 if (pps_count < (1 << pps_shift))
1332 return;
1333 pps_count = 0;
1334 pps_calcnt++;
1335 u_usec = usec * SCALE_USEC;
1336 v_usec = pps_usec - u_usec;
1337 if (v_usec >= bigtick >> 1)
1338 v_usec -= bigtick;
1339 if (v_usec < -(bigtick >> 1))
1340 v_usec += bigtick;
1341 if (v_usec < 0)
1342 v_usec = -(-v_usec >> pps_shift);
1343 else
1344 v_usec = v_usec >> pps_shift;
1345 pps_usec = u_usec;
1346 cal_sec = tvp->tv_sec;
1347 cal_usec = tvp->tv_usec;
1348 cal_sec -= pps_time.tv_sec;
1349 cal_usec -= pps_time.tv_usec;
1350 if (cal_usec < 0) {
1351 cal_usec += MICROSEC;
1352 cal_sec--;
1354 pps_time = *tvp;
1357 * Check for lost interrupts, noise, excessive jitter and
1358 * excessive frequency error. The number of timer ticks during
1359 * the interval may vary +-1 tick. Add to this a margin of one
1360 * tick for the PPS signal jitter and maximum frequency
1361 * deviation. If the limits are exceeded, the calibration
1362 * interval is reset to the minimum and we start over.
1364 u_usec = (int)usec_per_tick << 1;
1365 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1366 (cal_sec == 0 && cal_usec < u_usec)) ||
1367 v_usec > time_tolerance || v_usec < -time_tolerance) {
1368 pps_errcnt++;
1369 pps_shift = PPS_SHIFT;
1370 pps_intcnt = 0;
1371 time_status |= STA_PPSERROR;
1372 return;
1376 * A three-stage median filter is used to help deglitch the pps
1377 * frequency. The median sample becomes the frequency offset
1378 * estimate; the difference between the other two samples
1379 * becomes the frequency dispersion (stability) estimate.
1381 pps_ff[2] = pps_ff[1];
1382 pps_ff[1] = pps_ff[0];
1383 pps_ff[0] = v_usec;
1384 if (pps_ff[0] > pps_ff[1]) {
1385 if (pps_ff[1] > pps_ff[2]) {
1386 u_usec = pps_ff[1]; /* 0 1 2 */
1387 v_usec = pps_ff[0] - pps_ff[2];
1388 } else if (pps_ff[2] > pps_ff[0]) {
1389 u_usec = pps_ff[0]; /* 2 0 1 */
1390 v_usec = pps_ff[2] - pps_ff[1];
1391 } else {
1392 u_usec = pps_ff[2]; /* 0 2 1 */
1393 v_usec = pps_ff[0] - pps_ff[1];
1395 } else {
1396 if (pps_ff[1] < pps_ff[2]) {
1397 u_usec = pps_ff[1]; /* 2 1 0 */
1398 v_usec = pps_ff[2] - pps_ff[0];
1399 } else if (pps_ff[2] < pps_ff[0]) {
1400 u_usec = pps_ff[0]; /* 1 0 2 */
1401 v_usec = pps_ff[1] - pps_ff[2];
1402 } else {
1403 u_usec = pps_ff[2]; /* 1 2 0 */
1404 v_usec = pps_ff[1] - pps_ff[0];
1409 * Here the frequency dispersion (stability) is updated. If it
1410 * is less than one-fourth the maximum (MAXFREQ), the frequency
1411 * offset is updated as well, but clamped to the tolerance. It
1412 * will be processed later by the clock() routine.
1414 v_usec = (v_usec >> 1) - pps_stabil;
1415 if (v_usec < 0)
1416 pps_stabil -= -v_usec >> PPS_AVG;
1417 else
1418 pps_stabil += v_usec >> PPS_AVG;
1419 if (pps_stabil > MAXFREQ >> 2) {
1420 pps_stbcnt++;
1421 time_status |= STA_PPSWANDER;
1422 return;
1424 if (time_status & STA_PPSFREQ) {
1425 if (u_usec < 0) {
1426 pps_freq -= -u_usec >> PPS_AVG;
1427 if (pps_freq < -time_tolerance)
1428 pps_freq = -time_tolerance;
1429 u_usec = -u_usec;
1430 } else {
1431 pps_freq += u_usec >> PPS_AVG;
1432 if (pps_freq > time_tolerance)
1433 pps_freq = time_tolerance;
1438 * Here the calibration interval is adjusted. If the maximum
1439 * time difference is greater than tick / 4, reduce the interval
1440 * by half. If this is not the case for four consecutive
1441 * intervals, double the interval.
1443 if (u_usec << pps_shift > bigtick >> 2) {
1444 pps_intcnt = 0;
1445 if (pps_shift > PPS_SHIFT)
1446 pps_shift--;
1447 } else if (pps_intcnt >= 4) {
1448 pps_intcnt = 0;
1449 if (pps_shift < PPS_SHIFTMAX)
1450 pps_shift++;
1451 } else
1452 pps_intcnt++;
1455 * If recovering from kmdb, then make sure the tod chip gets resynced.
1456 * If we took an early exit above, then we don't yet have a stable
1457 * calibration signal to lock onto, so don't mark the tod for sync
1458 * until we get all the way here.
1461 int s = hr_clock_lock();
1463 tod_needsync = 1;
1464 hr_clock_unlock(s);
1469 * Handle clock tick processing for a thread.
1470 * Check for timer action, enforce CPU rlimit, do profiling etc.
1472 void
1473 clock_tick(kthread_t *t, int pending)
1475 struct proc *pp;
1476 klwp_id_t lwp;
1477 struct as *as;
1478 clock_t ticks;
1479 int poke = 0; /* notify another CPU */
1480 int user_mode;
1481 size_t rss;
1482 int i, total_usec, usec;
1483 rctl_qty_t secs;
1485 ASSERT(pending > 0);
1487 /* Must be operating on a lwp/thread */
1488 if ((lwp = ttolwp(t)) == NULL) {
1489 panic("clock_tick: no lwp");
1490 /*NOTREACHED*/
1493 for (i = 0; i < pending; i++) {
1494 CL_TICK(t); /* Class specific tick processing */
1495 DTRACE_SCHED1(tick, kthread_t *, t);
1498 pp = ttoproc(t);
1500 /* pp->p_lock makes sure that the thread does not exit */
1501 ASSERT(MUTEX_HELD(&pp->p_lock));
1503 user_mode = (lwp->lwp_state == LWP_USER);
1505 ticks = (pp->p_utime + pp->p_stime) % hz;
1507 * Update process times. Should use high res clock and state
1508 * changes instead of statistical sampling method. XXX
1510 if (user_mode) {
1511 pp->p_utime += pending;
1512 } else {
1513 pp->p_stime += pending;
1516 pp->p_ttime += pending;
1517 as = pp->p_as;
1520 * Update user profiling statistics. Get the pc from the
1521 * lwp when the AST happens.
1523 if (pp->p_prof.pr_scale) {
1524 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1525 if (user_mode) {
1526 poke = 1;
1527 aston(t);
1532 * If CPU was in user state, process lwp-virtual time
1533 * interval timer. The value passed to itimerdecr() has to be
1534 * in microseconds and has to be less than one second. Hence
1535 * this loop.
1537 total_usec = usec_per_tick * pending;
1538 while (total_usec > 0) {
1539 usec = MIN(total_usec, (MICROSEC - 1));
1540 if (user_mode &&
1541 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1542 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1543 poke = 1;
1544 sigtoproc(pp, t, SIGVTALRM);
1546 total_usec -= usec;
1550 * If CPU was in user state, process lwp-profile
1551 * interval timer.
1553 total_usec = usec_per_tick * pending;
1554 while (total_usec > 0) {
1555 usec = MIN(total_usec, (MICROSEC - 1));
1556 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1557 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1558 poke = 1;
1559 sigtoproc(pp, t, SIGPROF);
1561 total_usec -= usec;
1565 * Enforce CPU resource controls:
1566 * (a) process.max-cpu-time resource control
1568 * Perform the check only if we have accumulated more a second.
1570 if ((ticks + pending) >= hz) {
1571 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1572 (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1576 * (b) task.max-cpu-time resource control
1578 * If we have accumulated enough ticks, increment the task CPU
1579 * time usage and test for the resource limit. This minimizes the
1580 * number of calls to the rct_test(). The task CPU time mutex
1581 * is highly contentious as many processes can be sharing a task.
1583 if (pp->p_ttime >= clock_tick_proc_max) {
1584 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1585 pp->p_ttime = 0;
1586 if (secs) {
1587 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1588 pp, secs, RCA_UNSAFE_SIGINFO);
1593 * Update memory usage for the currently running process.
1595 rss = rm_asrss(as);
1596 PTOU(pp)->u_mem += rss;
1597 if (rss > PTOU(pp)->u_mem_max)
1598 PTOU(pp)->u_mem_max = rss;
1601 * Notify the CPU the thread is running on.
1603 if (poke && t->t_cpu != CPU)
1604 poke_cpu(t->t_cpu->cpu_id);
1607 void
1608 profil_tick(uintptr_t upc)
1610 int ticks;
1611 proc_t *p = ttoproc(curthread);
1612 klwp_t *lwp = ttolwp(curthread);
1613 struct prof *pr = &p->p_prof;
1615 do {
1616 ticks = lwp->lwp_oweupc;
1617 } while (atomic_cas_32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1619 mutex_enter(&p->p_pflock);
1620 if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1622 * Old-style profiling
1624 uint16_t *slot = pr->pr_base;
1625 uint16_t old, new;
1626 if (pr->pr_scale != 2) {
1627 uintptr_t delta = upc - pr->pr_off;
1628 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1629 (((delta & 0xffff) * pr->pr_scale) >> 16);
1630 if (byteoff >= (uintptr_t)pr->pr_size) {
1631 mutex_exit(&p->p_pflock);
1632 return;
1634 slot += byteoff / sizeof (uint16_t);
1636 if (fuword16(slot, &old) < 0 ||
1637 (new = old + ticks) > SHRT_MAX ||
1638 suword16(slot, new) < 0) {
1639 pr->pr_scale = 0;
1641 } else if (pr->pr_scale == 1) {
1643 * PC Sampling
1645 model_t model = lwp_getdatamodel(lwp);
1646 int result;
1647 while (ticks-- > 0) {
1648 if (pr->pr_samples == pr->pr_size) {
1649 /* buffer full, turn off sampling */
1650 pr->pr_scale = 0;
1651 break;
1653 switch (SIZEOF_PTR(model)) {
1654 case sizeof (uint32_t):
1655 result = suword32(pr->pr_base, (uint32_t)upc);
1656 break;
1657 #ifdef _LP64
1658 case sizeof (uint64_t):
1659 result = suword64(pr->pr_base, (uint64_t)upc);
1660 break;
1661 #endif
1662 default:
1663 cmn_err(CE_WARN, "profil_tick: unexpected "
1664 "data model");
1665 result = -1;
1666 break;
1668 if (result != 0) {
1669 pr->pr_scale = 0;
1670 break;
1672 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1673 pr->pr_samples++;
1676 mutex_exit(&p->p_pflock);
1679 static void
1680 delay_wakeup(void *arg)
1682 kthread_t *t = arg;
1684 mutex_enter(&t->t_delay_lock);
1685 cv_signal(&t->t_delay_cv);
1686 mutex_exit(&t->t_delay_lock);
1690 * The delay(9F) man page indicates that it can only be called from user or
1691 * kernel context - detect and diagnose bad calls. The following macro will
1692 * produce a limited number of messages identifying bad callers. This is done
1693 * in a macro so that caller() is meaningful. When a bad caller is identified,
1694 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1696 #define DELAY_CONTEXT_CHECK() { \
1697 uint32_t m; \
1698 char *f; \
1699 ulong_t off; \
1701 m = delay_from_interrupt_msg; \
1702 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1703 !panicstr && !devinfo_freeze && \
1704 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1705 f = modgetsymname((uintptr_t)caller(), &off); \
1706 cmn_err(CE_WARN, "delay(9F) called from " \
1707 "interrupt context: %s`%s", \
1708 mod_containing_pc(caller()), f ? f : "..."); \
1713 * delay_common: common delay code.
1715 static void
1716 delay_common(clock_t ticks)
1718 kthread_t *t = curthread;
1719 clock_t deadline;
1720 clock_t timeleft;
1721 callout_id_t id;
1723 /* If timeouts aren't running all we can do is spin. */
1724 if (panicstr || devinfo_freeze) {
1725 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1726 if (ticks > 0)
1727 drv_usecwait(TICK_TO_USEC(ticks));
1728 return;
1731 deadline = ddi_get_lbolt() + ticks;
1732 while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
1733 mutex_enter(&t->t_delay_lock);
1734 id = timeout_default(delay_wakeup, t, timeleft);
1735 cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1736 mutex_exit(&t->t_delay_lock);
1737 (void) untimeout_default(id, 0);
1742 * Delay specified number of clock ticks.
1744 void
1745 delay(clock_t ticks)
1747 DELAY_CONTEXT_CHECK();
1749 delay_common(ticks);
1753 * Delay a random number of clock ticks between 1 and ticks.
1755 void
1756 delay_random(clock_t ticks)
1758 int r;
1760 DELAY_CONTEXT_CHECK();
1762 (void) random_get_pseudo_bytes((void *)&r, sizeof (r));
1763 if (ticks == 0)
1764 ticks = 1;
1765 ticks = (r % ticks) + 1;
1766 delay_common(ticks);
1770 * Like delay, but interruptible by a signal.
1773 delay_sig(clock_t ticks)
1775 kthread_t *t = curthread;
1776 clock_t deadline;
1777 clock_t rc;
1779 /* If timeouts aren't running all we can do is spin. */
1780 if (panicstr || devinfo_freeze) {
1781 if (ticks > 0)
1782 drv_usecwait(TICK_TO_USEC(ticks));
1783 return (0);
1786 deadline = ddi_get_lbolt() + ticks;
1787 mutex_enter(&t->t_delay_lock);
1788 do {
1789 rc = cv_timedwait_sig(&t->t_delay_cv,
1790 &t->t_delay_lock, deadline);
1791 /* loop until past deadline or signaled */
1792 } while (rc > 0);
1793 mutex_exit(&t->t_delay_lock);
1794 if (rc == 0)
1795 return (EINTR);
1796 return (0);
1799 static void
1800 ddi_sleep_common(hrtime_t delay, hrtime_t resolution)
1802 kthread_t *t = curthread;
1803 hrtime_t deadline;
1804 callout_id_t id;
1805 hrtime_t tmp;
1807 /* If timeouts aren't running all we can do is spin. */
1808 if (panicstr || devinfo_freeze) {
1809 /* Convert ddi_*sleep(9F) call into drv_usecwait(9F) call. */
1810 if (NSEC2USEC(delay) > 0)
1811 drv_usecwait(NSEC2USEC(delay));
1812 return;
1816 * TODO: does this need to be in a loop checking that we didn't get
1817 * woken up too early?
1819 mutex_enter(&t->t_delay_lock);
1820 tmp = gethrtime();
1821 id = timeout_generic(CALLOUT_NORMAL, delay_wakeup, t, delay,
1822 resolution, CALLOUT_FLAG_ROUNDUP);
1823 cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1824 mutex_exit(&t->t_delay_lock);
1825 (void) untimeout_generic(id, 0);
1826 if (gethrtime() - tmp < delay)
1827 cmn_err(CE_WARN, "%s returned too soon (wanted %llu, got %llu)",
1828 __func__, delay, gethrtime() - tmp);
1831 void
1832 ddi_sleep(clock_t secs)
1834 hrtime_t res;
1837 * We don't want to use 1 s resulution unconditionally because of
1838 * how it is used for rounding up the deadline. With 1 s
1839 * resolution, a sleep of 1 second can take anywhere from 1 to
1840 * 1.999999999 seconds on an idle system. This seems unacceptable,
1841 * and so we use either 100 ms or 10% of sleep interval as the
1842 * resolution - whichever is smaller.
1844 * (There is a similar issue with the milli- and micro- sleep
1845 * functions, but somehow an extra 1 ms or 1us doesn't seem as bad.)
1847 if (secs > 0)
1848 res = MIN(100000000 /* 100 ms */, SEC2NSEC(secs) / 10);
1849 else
1850 res = 100000000; /* 100 ms */
1852 ddi_sleep_common(SEC2NSEC(secs), res);
1855 void
1856 ddi_msleep(clock_t msecs)
1858 ddi_sleep_common(MSEC2NSEC(msecs), 1000000 /* 1 ms */);
1861 void
1862 ddi_usleep(clock_t usecs)
1864 ddi_sleep_common(USEC2NSEC(usecs), 1000 /* 1 us */);
1868 #define SECONDS_PER_DAY 86400
1871 * Initialize the system time based on the TOD chip. approx is used as
1872 * an approximation of time (e.g. from the filesystem) in the event that
1873 * the TOD chip has been cleared or is unresponsive. An approx of -1
1874 * means the filesystem doesn't keep time.
1876 void
1877 clkset(time_t approx)
1879 timestruc_t ts;
1880 int spl;
1881 int set_clock = 0;
1883 mutex_enter(&tod_lock);
1884 ts = tod_get();
1886 if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1888 * If the TOD chip is reporting some time after 1971,
1889 * then it probably didn't lose power or become otherwise
1890 * cleared in the recent past; check to assure that
1891 * the time coming from the filesystem isn't in the future
1892 * according to the TOD chip.
1894 if (approx != -1 && approx > ts.tv_sec) {
1895 cmn_err(CE_WARN, "Last shutdown is later "
1896 "than time on time-of-day chip; check date.");
1898 } else {
1900 * If the TOD chip isn't giving correct time, set it to the
1901 * greater of i) approx and ii) 1987. That way if approx
1902 * is negative or is earlier than 1987, we set the clock
1903 * back to a time when Oliver North, ALF and Dire Straits
1904 * were all on the collective brain: 1987.
1906 timestruc_t tmp;
1907 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1908 ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
1909 ts.tv_nsec = 0;
1912 * Attempt to write the new time to the TOD chip. Set spl high
1913 * to avoid getting preempted between the tod_set and tod_get.
1915 spl = splhi();
1916 tod_set(ts);
1917 tmp = tod_get();
1918 splx(spl);
1920 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1921 tod_broken = 1;
1922 dosynctodr = 0;
1923 cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
1924 } else {
1925 cmn_err(CE_WARN, "Time-of-day chip had "
1926 "incorrect date; check and reset.");
1928 set_clock = 1;
1931 if (!boot_time) {
1932 boot_time = ts.tv_sec;
1933 set_clock = 1;
1936 if (set_clock)
1937 set_hrestime(&ts);
1939 mutex_exit(&tod_lock);
1942 int timechanged; /* for testing if the system time has been reset */
1944 void
1945 set_hrestime(timestruc_t *ts)
1947 int spl = hr_clock_lock();
1948 hrestime = *ts;
1949 membar_enter(); /* hrestime must be visible before timechanged++ */
1950 timedelta = 0;
1951 timechanged++;
1952 hr_clock_unlock(spl);
1953 callout_hrestime();
1956 static uint_t deadman_seconds;
1957 static uint32_t deadman_panics;
1958 static int deadman_enabled = 0;
1959 static int deadman_panic_timers = 1;
1961 static void
1962 deadman(void)
1964 if (panicstr) {
1966 * During panic, other CPUs besides the panic
1967 * master continue to handle cyclics and some other
1968 * interrupts. The code below is intended to be
1969 * single threaded, so any CPU other than the master
1970 * must keep out.
1972 if (CPU->cpu_id != panic_cpu.cpu_id)
1973 return;
1975 if (!deadman_panic_timers)
1976 return; /* allow all timers to be manually disabled */
1979 * If we are generating a crash dump or syncing filesystems and
1980 * the corresponding timer is set, decrement it and re-enter
1981 * the panic code to abort it and advance to the next state.
1982 * The panic states and triggers are explained in panic.c.
1984 if (panic_dump) {
1985 if (dump_timeleft && (--dump_timeleft == 0)) {
1986 panic("panic dump timeout");
1987 /*NOTREACHED*/
1990 return;
1993 if (deadman_counter != CPU->cpu_deadman_counter) {
1994 CPU->cpu_deadman_counter = deadman_counter;
1995 CPU->cpu_deadman_countdown = deadman_seconds;
1996 return;
1999 if (--CPU->cpu_deadman_countdown > 0)
2000 return;
2003 * Regardless of whether or not we actually bring the system down,
2004 * bump the deadman_panics variable.
2006 * N.B. deadman_panics is incremented once for each CPU that
2007 * passes through here. It's expected that all the CPUs will
2008 * detect this condition within one second of each other, so
2009 * when deadman_enabled is off, deadman_panics will
2010 * typically be a multiple of the total number of CPUs in
2011 * the system.
2013 atomic_inc_32(&deadman_panics);
2015 if (!deadman_enabled) {
2016 CPU->cpu_deadman_countdown = deadman_seconds;
2017 return;
2021 * If we're here, we want to bring the system down.
2023 panic("deadman: timed out after %d seconds of clock "
2024 "inactivity", deadman_seconds);
2025 /*NOTREACHED*/
2028 /*ARGSUSED*/
2029 static void
2030 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
2032 cpu->cpu_deadman_counter = 0;
2033 cpu->cpu_deadman_countdown = deadman_seconds;
2035 hdlr->cyh_func = (cyc_func_t)deadman;
2036 hdlr->cyh_level = CY_HIGH_LEVEL;
2037 hdlr->cyh_arg = NULL;
2040 * Stagger the CPUs so that they don't all run deadman() at
2041 * the same time. Simplest reason to do this is to make it
2042 * more likely that only one CPU will panic in case of a
2043 * timeout. This is (strictly speaking) an aesthetic, not a
2044 * technical consideration.
2046 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
2047 when->cyt_interval = NANOSEC;
2051 void
2052 deadman_init(void)
2054 cyc_omni_handler_t hdlr;
2056 if (deadman_seconds == 0)
2057 deadman_seconds = snoop_interval / MICROSEC;
2059 if (snooping)
2060 deadman_enabled = 1;
2062 hdlr.cyo_online = deadman_online;
2063 hdlr.cyo_offline = NULL;
2064 hdlr.cyo_arg = NULL;
2066 mutex_enter(&cpu_lock);
2067 deadman_cyclic = cyclic_add_omni(&hdlr);
2068 mutex_exit(&cpu_lock);
2072 * tod_fault() is for updating tod validate mechanism state:
2073 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2074 * currently used for debugging only
2075 * (2) The following four cases detected by tod validate mechanism:
2076 * TOD_REVERSED: current tod value is less than previous value.
2077 * TOD_STALLED: current tod value hasn't advanced.
2078 * TOD_JUMPED: current tod value advanced too far from previous value.
2079 * TOD_RATECHANGED: the ratio between average tod delta and
2080 * average tick delta has changed.
2081 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2082 * a virtual TOD provided by a hypervisor.
2084 enum tod_fault_type
2085 tod_fault(enum tod_fault_type ftype, int off)
2087 ASSERT(MUTEX_HELD(&tod_lock));
2089 if (tod_faulted != ftype) {
2090 switch (ftype) {
2091 case TOD_NOFAULT:
2092 plat_tod_fault(TOD_NOFAULT);
2093 cmn_err(CE_NOTE, "Restarted tracking "
2094 "Time of Day clock.");
2095 tod_faulted = ftype;
2096 break;
2097 case TOD_REVERSED:
2098 case TOD_JUMPED:
2099 if (tod_faulted == TOD_NOFAULT) {
2100 plat_tod_fault(ftype);
2101 cmn_err(CE_WARN, "Time of Day clock error: "
2102 "reason [%s by 0x%x]. -- "
2103 " Stopped tracking Time Of Day clock.",
2104 tod_fault_table[ftype], off);
2105 tod_faulted = ftype;
2107 break;
2108 case TOD_STALLED:
2109 case TOD_RATECHANGED:
2110 if (tod_faulted == TOD_NOFAULT) {
2111 plat_tod_fault(ftype);
2112 cmn_err(CE_WARN, "Time of Day clock error: "
2113 "reason [%s]. -- "
2114 " Stopped tracking Time Of Day clock.",
2115 tod_fault_table[ftype]);
2116 tod_faulted = ftype;
2118 break;
2119 case TOD_RDONLY:
2120 if (tod_faulted == TOD_NOFAULT) {
2121 plat_tod_fault(ftype);
2122 cmn_err(CE_NOTE, "!Time of Day clock is "
2123 "Read-Only; set of Date/Time will not "
2124 "persist across reboot.");
2125 tod_faulted = ftype;
2127 break;
2128 default:
2129 break;
2132 return (tod_faulted);
2136 * Two functions that allow tod_status_flag to be manipulated by functions
2137 * external to this file.
2140 void
2141 tod_status_set(int tod_flag)
2143 tod_status_flag |= tod_flag;
2146 void
2147 tod_status_clear(int tod_flag)
2149 tod_status_flag &= ~tod_flag;
2153 * Record a timestamp and the value passed to tod_set(). The next call to
2154 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2155 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2156 * tod_validate() will use prev_tick and prev_tod for this task but these
2157 * become obsolete, and will be re-assigned with the prev_set_* values,
2158 * in the case when the TOD is re-written.
2160 void
2161 tod_set_prev(timestruc_t ts)
2163 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2164 tod_validate_deferred) {
2165 return;
2167 prev_set_tick = gethrtime();
2169 * A negative value will be set to zero in utc_to_tod() so we fake
2170 * a zero here in such a case. This would need to change if the
2171 * behavior of utc_to_tod() changes.
2173 prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec;
2177 * tod_validate() is used for checking values returned by tod_get().
2178 * Four error cases can be detected by this routine:
2179 * TOD_REVERSED: current tod value is less than previous.
2180 * TOD_STALLED: current tod value hasn't advanced.
2181 * TOD_JUMPED: current tod value advanced too far from previous value.
2182 * TOD_RATECHANGED: the ratio between average tod delta and
2183 * average tick delta has changed.
2185 time_t
2186 tod_validate(time_t tod)
2188 time_t diff_tod;
2189 hrtime_t diff_tick;
2191 long dtick;
2192 int dtick_delta;
2194 int off = 0;
2195 enum tod_fault_type tod_bad = TOD_NOFAULT;
2197 static int firsttime = 1;
2199 static time_t prev_tod = 0;
2200 static hrtime_t prev_tick = 0;
2201 static long dtick_avg = TOD_REF_FREQ;
2203 int cpr_resume_done = 0;
2204 int dr_resume_done = 0;
2206 hrtime_t tick = gethrtime();
2208 ASSERT(MUTEX_HELD(&tod_lock));
2211 * tod_validate_enable is patchable via /etc/system.
2212 * If TOD is already faulted, or if TOD validation is deferred,
2213 * there is nothing to do.
2215 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2216 tod_validate_deferred) {
2217 return (tod);
2221 * If this is the first time through, we just need to save the tod
2222 * we were called with and hrtime so we can use them next time to
2223 * validate tod_get().
2225 if (firsttime) {
2226 firsttime = 0;
2227 prev_tod = tod;
2228 prev_tick = tick;
2229 return (tod);
2233 * Handle any flags that have been turned on by tod_status_set().
2234 * In the case where a tod_set() is done and then a subsequent
2235 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2236 * true), we treat the TOD_GET_FAILED with precedence by switching
2237 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2238 * until such time as tod_get() completes successfully.
2240 if (tod_status_flag & TOD_GET_FAILED) {
2242 * tod_get() has encountered an issue, possibly transitory,
2243 * when reading TOD. We'll just return the incoming tod
2244 * value (which is actually hrestime.tv_sec in this case)
2245 * and when we get a genuine tod, following a successful
2246 * tod_get(), we can validate using prev_tod and prev_tick.
2248 tod_status_flag &= ~TOD_GET_FAILED;
2249 return (tod);
2250 } else if (tod_status_flag & TOD_SET_DONE) {
2252 * TOD has been modified. Just before the TOD was written,
2253 * tod_set_prev() saved tod and hrtime; we can now use
2254 * those values, prev_set_tod and prev_set_tick, to validate
2255 * the incoming tod that's just been read.
2257 prev_tod = prev_set_tod;
2258 prev_tick = prev_set_tick;
2259 dtick_avg = TOD_REF_FREQ;
2260 tod_status_flag &= ~TOD_SET_DONE;
2262 * If a tod_set() preceded a cpr_suspend() without an
2263 * intervening tod_validate(), we need to ensure that a
2264 * TOD_JUMPED condition is ignored.
2265 * Note this isn't a concern in the case of DR as we've
2266 * just reassigned dtick_avg, above.
2268 if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2269 cpr_resume_done = 1;
2270 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2272 } else if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2274 * The system's coming back from a checkpoint resume.
2276 cpr_resume_done = 1;
2277 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2279 * We need to handle the possibility of a CPR suspend
2280 * operation having been initiated whilst a DR event was
2281 * in-flight.
2283 if (tod_status_flag & TOD_DR_RESUME_DONE) {
2284 dr_resume_done = 1;
2285 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2287 } else if (tod_status_flag & TOD_DR_RESUME_DONE) {
2289 * A Dynamic Reconfiguration event has taken place.
2291 dr_resume_done = 1;
2292 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2295 /* test hook */
2296 switch (tod_unit_test) {
2297 case 1: /* for testing jumping tod */
2298 tod += tod_test_injector;
2299 tod_unit_test = 0;
2300 break;
2301 case 2: /* for testing stuck tod bit */
2302 tod |= 1 << tod_test_injector;
2303 tod_unit_test = 0;
2304 break;
2305 case 3: /* for testing stalled tod */
2306 tod = prev_tod;
2307 tod_unit_test = 0;
2308 break;
2309 case 4: /* reset tod fault status */
2310 (void) tod_fault(TOD_NOFAULT, 0);
2311 tod_unit_test = 0;
2312 break;
2313 default:
2314 break;
2317 diff_tod = tod - prev_tod;
2318 diff_tick = tick - prev_tick;
2320 ASSERT(diff_tick >= 0);
2322 if (diff_tod < 0) {
2323 /* ERROR - tod reversed */
2324 tod_bad = TOD_REVERSED;
2325 off = (int)(prev_tod - tod);
2326 } else if (diff_tod == 0) {
2327 /* tod did not advance */
2328 if (diff_tick > TOD_STALL_THRESHOLD) {
2329 /* ERROR - tod stalled */
2330 tod_bad = TOD_STALLED;
2331 } else {
2333 * Make sure we don't update prev_tick
2334 * so that diff_tick is calculated since
2335 * the first diff_tod == 0
2337 return (tod);
2339 } else {
2340 /* calculate dtick */
2341 dtick = diff_tick / diff_tod;
2343 /* update dtick averages */
2344 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2347 * Calculate dtick_delta as
2348 * variation from reference freq in quartiles
2350 dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2351 (TOD_REF_FREQ >> 2);
2354 * Even with a perfectly functioning TOD device,
2355 * when the number of elapsed seconds is low the
2356 * algorithm can calculate a rate that is beyond
2357 * tolerance, causing an error. The algorithm is
2358 * inaccurate when elapsed time is low (less than
2359 * 5 seconds).
2361 if (diff_tod > 4) {
2362 if (dtick < TOD_JUMP_THRESHOLD) {
2364 * If we've just done a CPR resume, we detect
2365 * a jump in the TOD but, actually, what's
2366 * happened is that the TOD has been increasing
2367 * whilst the system was suspended and the tick
2368 * count hasn't kept up. We consider the first
2369 * occurrence of this after a resume as normal
2370 * and ignore it; otherwise, in a non-resume
2371 * case, we regard it as a TOD problem.
2373 if (!cpr_resume_done) {
2374 /* ERROR - tod jumped */
2375 tod_bad = TOD_JUMPED;
2376 off = (int)diff_tod;
2379 if (dtick_delta) {
2381 * If we've just done a DR resume, dtick_avg
2382 * can go a bit askew so we reset it and carry
2383 * on; otherwise, the TOD is in error.
2385 if (dr_resume_done) {
2386 dtick_avg = TOD_REF_FREQ;
2387 } else {
2388 /* ERROR - change in clock rate */
2389 tod_bad = TOD_RATECHANGED;
2395 if (tod_bad != TOD_NOFAULT) {
2396 (void) tod_fault(tod_bad, off);
2399 * Disable dosynctodr since we are going to fault
2400 * the TOD chip anyway here
2402 dosynctodr = 0;
2405 * Set tod to the correct value from hrestime
2407 tod = hrestime.tv_sec;
2410 prev_tod = tod;
2411 prev_tick = tick;
2412 return (tod);
2415 static void
2416 calcloadavg(int nrun, uint64_t *hp_ave)
2418 static int64_t f[3] = { 135, 27, 9 };
2419 uint_t i;
2420 int64_t q, r;
2423 * Compute load average over the last 1, 5, and 15 minutes
2424 * (60, 300, and 900 seconds). The constants in f[3] are for
2425 * exponential decay:
2426 * (1 - exp(-1/60)) << 13 = 135,
2427 * (1 - exp(-1/300)) << 13 = 27,
2428 * (1 - exp(-1/900)) << 13 = 9.
2432 * a little hoop-jumping to avoid integer overflow
2434 for (i = 0; i < 3; i++) {
2435 q = (hp_ave[i] >> 16) << 7;
2436 r = (hp_ave[i] & 0xffff) << 7;
2437 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2442 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2443 * calculate the value of lbolt according to the current mode. In the event
2444 * driven mode (the default), lbolt is calculated by dividing the current hires
2445 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2446 * an internal variable is incremented at each firing of the lbolt cyclic
2447 * and returned by lbolt_cyclic_driven().
2449 * The system will transition from event to cyclic driven mode when the number
2450 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2451 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2452 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2453 * causing enough activity to cross the thresholds.
2455 int64_t
2456 lbolt_bootstrap(void)
2458 return (0);
2461 /* ARGSUSED */
2462 uint_t
2463 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
2465 hrtime_t ts, exp;
2466 int ret;
2468 ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
2470 kpreempt_disable();
2472 ts = gethrtime();
2473 lb_info->lbi_internal = (ts/nsec_per_tick);
2476 * Align the next expiration to a clock tick boundary.
2478 exp = ts + nsec_per_tick - 1;
2479 exp = (exp/nsec_per_tick) * nsec_per_tick;
2481 ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
2482 ASSERT(ret);
2484 lbolt_hybrid = lbolt_cyclic_driven;
2485 lb_info->lbi_cyc_deactivate = B_FALSE;
2486 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2488 kpreempt_enable();
2490 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2491 ASSERT(ret == 0);
2493 return (1);
2496 int64_t
2497 lbolt_event_driven(void)
2499 hrtime_t ts;
2500 int64_t lb;
2501 int ret, cpu = CPU->cpu_seqid;
2503 ts = gethrtime();
2504 ASSERT(ts > 0);
2506 ASSERT(nsec_per_tick > 0);
2507 lb = (ts/nsec_per_tick);
2510 * Switch to cyclic mode if the number of calls to this routine
2511 * has reached the threshold within the interval.
2513 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
2515 if (--lb_cpu[cpu].lbc_counter == 0) {
2517 * Reached the threshold within the interval, reset
2518 * the usage statistics.
2520 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2521 lb_cpu[cpu].lbc_cnt_start = lb;
2524 * Make sure only one thread reprograms the
2525 * lbolt cyclic and changes the mode.
2527 if (panicstr == NULL &&
2528 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2530 if (lbolt_hybrid == lbolt_cyclic_driven) {
2531 ret = atomic_dec_32_nv(
2532 &lb_info->lbi_token);
2533 ASSERT(ret == 0);
2534 } else {
2535 lbolt_softint_post();
2539 } else {
2541 * Exceeded the interval, reset the usage statistics.
2543 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2544 lb_cpu[cpu].lbc_cnt_start = lb;
2547 ASSERT(lb >= lb_info->lbi_debug_time);
2549 return (lb - lb_info->lbi_debug_time);
2552 int64_t
2553 lbolt_cyclic_driven(void)
2555 int64_t lb = lb_info->lbi_internal;
2556 int cpu;
2559 * If a CPU has already prevented the lbolt cyclic from deactivating
2560 * itself, don't bother tracking the usage. Otherwise check if we're
2561 * within the interval and how the per CPU counter is doing.
2563 if (lb_info->lbi_cyc_deactivate) {
2564 cpu = CPU->cpu_seqid;
2565 if ((lb - lb_cpu[cpu].lbc_cnt_start) <
2566 lb_info->lbi_thresh_interval) {
2568 if (lb_cpu[cpu].lbc_counter == 0)
2570 * Reached the threshold within the interval,
2571 * prevent the lbolt cyclic from turning itself
2572 * off.
2574 lb_info->lbi_cyc_deactivate = B_FALSE;
2575 else
2576 lb_cpu[cpu].lbc_counter--;
2577 } else {
2579 * Only reset the usage statistics when we have
2580 * exceeded the interval.
2582 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2583 lb_cpu[cpu].lbc_cnt_start = lb;
2587 ASSERT(lb >= lb_info->lbi_debug_time);
2589 return (lb - lb_info->lbi_debug_time);
2593 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2594 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2595 * It is inactive by default, and will be activated when switching from event
2596 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2597 * by lbolt_cyclic_driven().
2599 static void
2600 lbolt_cyclic(void)
2602 int ret;
2604 lb_info->lbi_internal++;
2606 if (!lbolt_cyc_only) {
2608 if (lb_info->lbi_cyc_deactivate) {
2610 * Switching from cyclic to event driven mode.
2612 if (panicstr == NULL &&
2613 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2615 if (lbolt_hybrid == lbolt_event_driven) {
2616 ret = atomic_dec_32_nv(
2617 &lb_info->lbi_token);
2618 ASSERT(ret == 0);
2619 return;
2622 kpreempt_disable();
2624 lbolt_hybrid = lbolt_event_driven;
2625 ret = cyclic_reprogram(
2626 lb_info->id.lbi_cyclic_id,
2627 CY_INFINITY);
2628 ASSERT(ret);
2630 kpreempt_enable();
2632 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2633 ASSERT(ret == 0);
2638 * The lbolt cyclic should not try to deactivate itself before
2639 * the sampling period has elapsed.
2641 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
2642 lb_info->lbi_thresh_interval) {
2643 lb_info->lbi_cyc_deactivate = B_TRUE;
2644 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2650 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2651 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2652 * called by the KDI system claim callbacks to record a hires timestamp at
2653 * debug enter time. lbolt_debug_return() is called by the sistem release
2654 * callbacks to account for the time spent in the debugger. The value is then
2655 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2656 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2658 void
2659 lbolt_debug_entry(void)
2661 if (lbolt_hybrid != lbolt_bootstrap) {
2662 ASSERT(lb_info != NULL);
2663 lb_info->lbi_debug_ts = gethrtime();
2668 * Calculate the time spent in the debugger and add it to the lbolt info
2669 * structure. We also update the internal lbolt value in case we were in
2670 * cyclic driven mode going in.
2672 void
2673 lbolt_debug_return(void)
2675 hrtime_t ts;
2677 if (lbolt_hybrid != lbolt_bootstrap) {
2678 ASSERT(lb_info != NULL);
2679 ASSERT(nsec_per_tick > 0);
2681 ts = gethrtime();
2682 lb_info->lbi_internal = (ts/nsec_per_tick);
2683 lb_info->lbi_debug_time +=
2684 ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
2686 lb_info->lbi_debug_ts = 0;