4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Joyent, Inc. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
32 #include <sys/cpuvar.h>
33 #include <sys/thread.h>
34 #include <sys/debug.h>
35 #include <sys/msacct.h>
40 * Mega-theory block comment:
42 * Microstate accounting uses finite states and the transitions between these
43 * states to measure timing and accounting information. The state information
44 * is presently tracked for threads (via microstate accounting) and cpus (via
45 * cpu microstate accounting). In each case, these accounting mechanisms use
46 * states and transitions to measure time spent in each state instead of
47 * clock-based sampling methodologies.
49 * For microstate accounting:
50 * state transitions are accomplished by calling new_mstate() to switch between
51 * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur
52 * by calling restore_mstate() which restores a thread to its previously running
53 * state. This code is primarialy executed by the dispatcher in disp() before
54 * running a process that was put to sleep. If the thread was not in a sleeping
55 * state, this call has little effect other than to update the count of time the
56 * thread has spent waiting on run-queues in its lifetime.
58 * For cpu microstate accounting:
59 * Cpu microstate accounting is similar to the microstate accounting for threads
60 * but it tracks user, system, and idle time for cpus. Cpu microstate
61 * accounting does not track interrupt times as there is a pre-existing
62 * interrupt accounting mechanism for this purpose. Cpu microstate accounting
63 * tracks time that user threads have spent active, idle, or in the system on a
64 * given cpu. Cpu microstate accounting has fewer states which allows it to
65 * have better defined transitions. The states transition in the following
68 * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE
70 * In order to get to the idle state, the cpu microstate must first go through
71 * the system state, and vice-versa for the user state from idle. The switching
72 * of the microstates from user to system is done as part of the regular thread
73 * microstate accounting code, except for the idle state which is switched by
74 * the dispatcher before it runs the idle loop.
77 * Cpu percentages are now handled by and based upon microstate accounting
78 * information (the same is true for load averages). The routines which handle
79 * the growing/shrinking and exponentiation of cpu percentages have been moved
80 * here as it now makes more sense for them to be generated from the microstate
81 * code. Cpu percentages are generated similarly to the way they were before;
82 * however, now they are based upon high-resolution timestamps and the
83 * timestamps are modified at various state changes instead of during a clock()
84 * interrupt. This allows us to generate more accurate cpu percentages which
85 * are also in-sync with microstate data.
89 * Initialize the microstate level and the
90 * associated accounting information for an LWP.
101 ASSERT(init_state
!= LMS_WAIT_CPU
);
102 ASSERT((unsigned)init_state
< NMSTATES
);
104 if ((lwp
= ttolwp(t
)) != NULL
) {
105 ms
= &lwp
->lwp_mstate
;
106 curtime
= gethrtime_unscaled();
107 ms
->ms_prev
= LMS_SYSTEM
;
108 ms
->ms_start
= curtime
;
110 ms
->ms_state_start
= curtime
;
111 t
->t_mstate
= init_state
;
113 t
->t_hrtime
= curtime
;
114 if ((t
->t_proc_flag
& TP_MSACCT
) == 0)
115 t
->t_proc_flag
|= TP_MSACCT
;
116 bzero((caddr_t
)&ms
->ms_acct
[0], sizeof (ms
->ms_acct
));
121 * Initialize the microstate level and associated accounting information
122 * for the specified cpu
130 ASSERT(init_state
!= CMS_DISABLED
);
132 cpu
->cpu_mstate
= init_state
;
133 cpu
->cpu_mstate_start
= gethrtime_unscaled();
135 bzero((caddr_t
)&cpu
->cpu_acct
[0], sizeof (cpu
->cpu_acct
));
139 * sets cpu state to OFFLINE. We don't actually track this time,
140 * but it serves as a useful placeholder state for when we're not
145 term_cpu_mstate(struct cpu
*cpu
)
147 ASSERT(cpu
->cpu_mstate
!= CMS_DISABLED
);
148 cpu
->cpu_mstate
= CMS_DISABLED
;
149 cpu
->cpu_mstate_start
= 0;
152 /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */
154 #define NEW_CPU_MSTATE(state) \
155 gen = cpu->cpu_mstate_gen; \
156 cpu->cpu_mstate_gen = 0; \
157 /* Need membar_producer() here if stores not ordered / TSO */ \
158 cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \
159 cpu->cpu_mstate = state; \
160 cpu->cpu_mstate_start = curtime; \
161 /* Need membar_producer() here if stores not ordered / TSO */ \
162 cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen;
165 new_cpu_mstate(int cmstate
, hrtime_t curtime
)
170 ASSERT(cpu
->cpu_mstate
!= CMS_DISABLED
);
171 ASSERT(cmstate
< NCMSTATES
);
172 ASSERT(cmstate
!= CMS_DISABLED
);
175 * This function cannot be re-entrant on a given CPU. As such,
176 * we ASSERT and panic if we are called on behalf of an interrupt.
177 * The one exception is for an interrupt which has previously
178 * blocked. Such an interrupt is being scheduled by the dispatcher
179 * just like a normal thread, and as such cannot arrive here
180 * in a re-entrant manner.
183 ASSERT(!CPU_ON_INTR(cpu
) && curthread
->t_intr
== NULL
);
184 ASSERT(curthread
->t_preempt
> 0 || curthread
== cpu
->cpu_idle_thread
);
187 * LOCKING, or lack thereof:
189 * Updates to CPU mstate can only be made by the CPU
190 * itself, and the above check to ignore interrupts
191 * should prevent recursion into this function on a given
192 * processor. i.e. no possible write contention.
194 * However, reads of CPU mstate can occur at any time
195 * from any CPU. Any locking added to this code path
196 * would seriously impact syscall performance. So,
197 * instead we have a best-effort protection for readers.
198 * The reader will want to account for any time between
199 * cpu_mstate_start and the present time. This requires
200 * some guarantees that the reader is getting coherent
203 * We use a generation counter, which is set to 0 before
204 * we start making changes, and is set to a new value
205 * after we're done. Someone reading the CPU mstate
206 * should check for the same non-zero value of this
207 * counter both before and after reading all state. The
208 * important point is that the reader is not a
209 * performance-critical path, but this function is.
211 * The ordering of writes is critical. cpu_mstate_gen must
212 * be visibly zero on all CPUs before we change cpu_mstate
213 * and cpu_mstate_start. Additionally, cpu_mstate_gen must
214 * not be restored to oldgen+1 until after all of the other
215 * writes have become visible.
217 * Normally one puts membar_producer() calls to accomplish
218 * this. Unfortunately this routine is extremely performance
219 * critical (esp. in syscall_mstate below) and we cannot
220 * afford the additional time, particularly on some x86
221 * architectures with extremely slow sfence calls. On a
222 * CPU which guarantees write ordering (including sparc, x86,
223 * and amd64) this is not a problem. The compiler could still
224 * reorder the writes, so we make the four cpu fields
225 * volatile to prevent this.
227 * TSO warning: should we port to a non-TSO (or equivalent)
228 * CPU, this will break.
230 * The reader stills needs the membar_consumer() calls because,
231 * although the volatiles prevent the compiler from reordering
232 * loads, the CPU can still do so.
235 NEW_CPU_MSTATE(cmstate
);
239 * Return an aggregation of user and system CPU time consumed by
240 * the specified thread in scaled nanoseconds.
243 mstate_thread_onproc_time(kthread_t
*t
)
248 hrtime_t state_start
;
253 ASSERT(THREAD_LOCK_HELD(t
));
255 if ((lwp
= ttolwp(t
)) == NULL
)
258 mstate
= t
->t_mstate
;
259 waitrq
= t
->t_waitrq
;
260 ms
= &lwp
->lwp_mstate
;
261 state_start
= ms
->ms_state_start
;
263 aggr_time
= ms
->ms_acct
[LMS_USER
] +
264 ms
->ms_acct
[LMS_SYSTEM
] + ms
->ms_acct
[LMS_TRAP
];
266 now
= gethrtime_unscaled();
269 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
270 * inconsistent, so it is possible that now < state_start.
272 if (mstate
== LMS_USER
|| mstate
== LMS_SYSTEM
|| mstate
== LMS_TRAP
) {
273 /* if waitrq is zero, count all of the time. */
278 if (waitrq
> state_start
) {
279 aggr_time
+= waitrq
- state_start
;
283 scalehrtime(&aggr_time
);
288 * Return the amount of onproc and runnable time this thread has experienced.
290 * Because the fields we read are not protected by locks when updated
291 * by the thread itself, this is an inherently racey interface. In
292 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
293 * as it might appear to.
295 * The implication for users of this interface is that onproc and runnable
296 * are *NOT* monotonically increasing; they may temporarily be larger than
300 mstate_systhread_times(kthread_t
*t
, hrtime_t
*onproc
, hrtime_t
*runnable
)
302 struct mstate
*const ms
= &ttolwp(t
)->lwp_mstate
;
306 hrtime_t state_start
;
311 ASSERT(THREAD_LOCK_HELD(t
));
312 ASSERT(t
->t_procp
->p_flag
& SSYS
);
313 ASSERT(ttolwp(t
) != NULL
);
315 /* shouldn't be any non-SYSTEM on-CPU time */
316 ASSERT(ms
->ms_acct
[LMS_USER
] == 0);
317 ASSERT(ms
->ms_acct
[LMS_TRAP
] == 0);
319 mstate
= t
->t_mstate
;
320 waitrq
= t
->t_waitrq
;
321 state_start
= ms
->ms_state_start
;
323 aggr_onp
= ms
->ms_acct
[LMS_SYSTEM
];
324 aggr_run
= ms
->ms_acct
[LMS_WAIT_CPU
];
326 now
= gethrtime_unscaled();
328 /* if waitrq == 0, then there is no time to account to TS_RUN */
332 /* If there is system time to accumulate, do so */
333 if (mstate
== LMS_SYSTEM
&& state_start
< waitrq
)
334 aggr_onp
+= waitrq
- state_start
;
337 aggr_run
+= now
- waitrq
;
339 scalehrtime(&aggr_onp
);
340 scalehrtime(&aggr_run
);
343 *runnable
= aggr_run
;
347 * Return an aggregation of microstate times in scaled nanoseconds (high-res
348 * time). This keeps in mind that p_acct is already scaled, and ms_acct is
352 mstate_aggr_state(proc_t
*p
, int a_state
)
360 ASSERT(MUTEX_HELD(&p
->p_lock
));
361 ASSERT((unsigned)a_state
< NMSTATES
);
363 aggr_time
= p
->p_acct
[a_state
];
364 if (a_state
== LMS_SYSTEM
)
365 aggr_time
+= p
->p_acct
[LMS_TRAP
];
372 if (t
->t_proc_flag
& TP_LWPEXIT
)
376 ms
= &lwp
->lwp_mstate
;
377 scaledtime
= ms
->ms_acct
[a_state
];
378 scalehrtime(&scaledtime
);
379 aggr_time
+= scaledtime
;
380 if (a_state
== LMS_SYSTEM
) {
381 scaledtime
= ms
->ms_acct
[LMS_TRAP
];
382 scalehrtime(&scaledtime
);
383 aggr_time
+= scaledtime
;
385 } while ((t
= t
->t_forw
) != p
->p_tlist
);
392 syscall_mstate(int fromms
, int toms
)
394 kthread_t
*t
= curthread
;
395 zone_t
*z
= ttozone(t
);
404 if ((lwp
= ttolwp(t
)) == NULL
)
407 ASSERT(fromms
< NMSTATES
);
408 ASSERT(toms
< NMSTATES
);
410 ms
= &lwp
->lwp_mstate
;
411 mstimep
= &ms
->ms_acct
[fromms
];
412 curtime
= gethrtime_unscaled();
413 newtime
= curtime
- ms
->ms_state_start
;
414 while (newtime
< 0) {
415 curtime
= gethrtime_unscaled();
416 newtime
= curtime
- ms
->ms_state_start
;
419 if (fromms
== LMS_USER
)
420 atomic_add_64(&z
->zone_utime
, newtime
);
421 else if (fromms
== LMS_SYSTEM
)
422 atomic_add_64(&z
->zone_stime
, newtime
);
424 ms
->ms_state_start
= curtime
;
425 ms
->ms_prev
= fromms
;
426 kpreempt_disable(); /* don't change CPU while changing CPU's state */
428 ASSERT(cpu
== t
->t_cpu
);
429 if ((toms
!= LMS_USER
) && (cpu
->cpu_mstate
!= CMS_SYSTEM
)) {
430 NEW_CPU_MSTATE(CMS_SYSTEM
);
431 } else if ((toms
== LMS_USER
) && (cpu
->cpu_mstate
!= CMS_USER
)) {
432 NEW_CPU_MSTATE(CMS_USER
);
437 #undef NEW_CPU_MSTATE
440 * The following is for computing the percentage of cpu time used recently
441 * by an lwp. The function cpu_decay() is also called from /proc code.
444 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude,
445 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1].
447 * Scaling for 64-bit scaled integer:
448 * The binary point is to the right of the high-order bit
449 * of the low-order 32-bit word.
453 #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */
456 uint_t expx_cnt
= 0; /* number of calls to exp_x() */
457 uint_t expx_mul
= 0; /* number of long multiplies in exp_x() */
472 * exp(-x) = exp(-x/2) * exp(-x/2)
473 * we keep halving x until it becomes small enough for
474 * the following approximation to be accurate enough:
476 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below).
477 * Our final error will be smaller than 4% .
481 * Use a uint64_t for the initial shift calculation.
483 ull
= x
>> (LSHIFT
-2);
487 * A number this large produces effectively 0 (actually .005).
488 * This way, we will never do more than 5 multiplies.
493 ui
= ull
; /* OK. Now we can use a uint_t. */
494 for (i
= 0; ui
!= 0; i
++)
499 expx_mul
+= i
; /* seldom happens */
505 * Now we compute 1 - x and square it the number of times
506 * that we halved x above to produce the final result:
510 x
= (x
* x
) >> LSHIFT
;
516 * Given the old percent cpu and a time delta in nanoseconds,
517 * return the new decayed percent cpu: pct * exp(-tau),
518 * where 'tau' is the time delta multiplied by a decay factor.
519 * We have chosen the decay factor (cpu_decay_factor in param.c)
520 * to make the decay over five seconds be approximately 20%.
522 * 'pct' is a 32-bit scaled integer <= 1
523 * The binary point is to the right of the high-order bit
524 * of the 32-bit word.
527 cpu_decay(uint32_t pct
, hrtime_t nsec
)
529 uint64_t delta
= (uint64_t)nsec
;
531 delta
/= cpu_decay_factor
;
532 return ((pct
* exp_x(delta
)) >> LSHIFT
);
536 * Given the old percent cpu and a time delta in nanoseconds,
537 * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau)
540 cpu_grow(uint32_t pct
, hrtime_t nsec
)
542 return (LSI_ONE
- cpu_decay(LSI_ONE
- pct
, nsec
));
547 * Defined to determine whether a lwp is still on a processor.
550 #define T_ONPROC(kt) \
551 ((kt)->t_mstate < LMS_SLEEP)
552 #define T_OFFPROC(kt) \
553 ((kt)->t_mstate >= LMS_SLEEP)
556 cpu_update_pct(kthread_t
*t
, hrtime_t newtime
)
564 * This routine can get called at PIL > 0, this *has* to be
565 * done atomically. Holding locks here causes bad things to happen.
570 pctcpu
= t
->t_pctcpu
;
572 delta
= newtime
- hrlb
;
574 newtime
= gethrtime_unscaled();
575 delta
= newtime
- hrlb
;
577 t
->t_hrtime
= newtime
;
579 if (T_ONPROC(t
) && t
->t_waitrq
== 0) {
580 npctcpu
= cpu_grow(pctcpu
, delta
);
582 npctcpu
= cpu_decay(pctcpu
, delta
);
584 } while (atomic_cas_32(&t
->t_pctcpu
, pctcpu
, npctcpu
) != pctcpu
);
590 * Change the microstate level for the LWP and update the
591 * associated accounting information. Return the previous
595 new_mstate(kthread_t
*t
, int new_state
)
608 ASSERT(new_state
!= LMS_WAIT_CPU
);
609 ASSERT((unsigned)new_state
< NMSTATES
);
610 ASSERT(t
== curthread
|| THREAD_LOCK_HELD(t
));
613 * Don't do microstate processing for threads without a lwp (kernel
614 * threads). Also, if we're an interrupt thread that is pinning another
615 * thread, our t_mstate hasn't been initialized. We'd be modifying the
616 * microstate of the underlying lwp which doesn't realize that it's
617 * pinned. In this case, also don't change the microstate.
619 if (((lwp
= ttolwp(t
)) == NULL
) || t
->t_intr
)
622 curtime
= gethrtime_unscaled();
624 /* adjust cpu percentages before we go any further */
625 (void) cpu_update_pct(t
, curtime
);
627 ms
= &lwp
->lwp_mstate
;
629 origstart
= ms
->ms_state_start
;
636 mstimep
= &ms
->ms_acct
[LMS_SYSTEM
];
639 mstimep
= &ms
->ms_acct
[state
];
642 ztime
= newtime
= curtime
- ms
->ms_state_start
;
644 curtime
= gethrtime_unscaled();
645 oldtime
= *mstimep
- 1; /* force CAS to fail */
650 t
->t_mstate
= new_state
;
651 ms
->ms_state_start
= curtime
;
652 } while (atomic_cas_64((uint64_t *)mstimep
, oldtime
, newtime
) !=
656 * When the system boots the initial startup thread will have a
657 * ms_state_start of 0 which would add a huge system time to the global
658 * zone. We want to skip aggregating that initial bit of work.
660 if (origstart
!= 0) {
662 if (state
== LMS_USER
)
663 atomic_add_64(&z
->zone_utime
, ztime
);
664 else if (state
== LMS_SYSTEM
)
665 atomic_add_64(&z
->zone_stime
, ztime
);
669 * Remember the previous running microstate.
671 if (state
!= LMS_SLEEP
&& state
!= LMS_STOPPED
)
675 * Switch CPU microstate if appropriate
678 kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */
679 ASSERT(t
->t_cpu
== CPU
);
680 if (!CPU_ON_INTR(t
->t_cpu
) && curthread
->t_intr
== NULL
) {
681 if (new_state
== LMS_USER
&& t
->t_cpu
->cpu_mstate
!= CMS_USER
)
682 new_cpu_mstate(CMS_USER
, curtime
);
683 else if (new_state
!= LMS_USER
&&
684 t
->t_cpu
->cpu_mstate
!= CMS_SYSTEM
)
685 new_cpu_mstate(CMS_SYSTEM
, curtime
);
689 return (ms
->ms_prev
);
693 * Restore the LWP microstate to the previous runnable state.
694 * Called from disp() with the newly selected lwp.
697 restore_mstate(kthread_t
*t
)
710 * Don't call restore mstate of threads without lwps. (Kernel threads)
712 * threads with t_intr set shouldn't be in the dispatcher, so assert
713 * that nobody here has t_intr.
715 ASSERT(t
->t_intr
== NULL
);
717 if ((lwp
= ttolwp(t
)) == NULL
)
720 curtime
= gethrtime_unscaled();
721 (void) cpu_update_pct(t
, curtime
);
722 ms
= &lwp
->lwp_mstate
;
723 ASSERT((unsigned)t
->t_mstate
< NMSTATES
);
725 switch (t
->t_mstate
) {
728 * Update the timer for the current sleep state.
730 ASSERT((unsigned)ms
->ms_prev
< NMSTATES
);
731 switch (ms
->ms_prev
) {
736 mstimep
= &ms
->ms_acct
[ms
->ms_prev
];
739 mstimep
= &ms
->ms_acct
[LMS_SLEEP
];
743 * Return to the previous run state.
745 t
->t_mstate
= ms
->ms_prev
;
748 mstimep
= &ms
->ms_acct
[LMS_STOPPED
];
750 * Return to the previous run state.
752 t
->t_mstate
= ms
->ms_prev
;
758 mstimep
= &ms
->ms_acct
[LMS_SYSTEM
];
761 mstimep
= &ms
->ms_acct
[t
->t_mstate
];
764 waitrq
= t
->t_waitrq
; /* hopefully atomic */
769 newtime
= waitrq
- ms
->ms_state_start
;
771 curtime
= gethrtime_unscaled();
772 oldtime
= *mstimep
- 1; /* force CAS to fail */
777 } while (atomic_cas_64((uint64_t *)mstimep
, oldtime
, newtime
) !=
781 * Update the WAIT_CPU timer and per-cpu waitrq total.
784 waittime
= curtime
- waitrq
;
785 ms
->ms_acct
[LMS_WAIT_CPU
] += waittime
;
786 atomic_add_64(&z
->zone_wtime
, waittime
);
787 CPU
->cpu_waitrq
+= waittime
;
788 ms
->ms_state_start
= curtime
;
792 * Copy lwp microstate accounting and resource usage information
793 * to the process. (lwp is terminating)
796 term_mstate(kthread_t
*t
)
799 proc_t
*p
= ttoproc(t
);
800 klwp_t
*lwp
= ttolwp(t
);
804 ASSERT(MUTEX_HELD(&p
->p_lock
));
806 ms
= &lwp
->lwp_mstate
;
807 (void) new_mstate(t
, LMS_STOPPED
);
808 ms
->ms_term
= ms
->ms_state_start
;
809 tmp
= ms
->ms_term
- ms
->ms_start
;
812 for (i
= 0; i
< NMSTATES
; i
++) {
813 tmp
= ms
->ms_acct
[i
];
817 p
->p_ru
.minflt
+= lwp
->lwp_ru
.minflt
;
818 p
->p_ru
.majflt
+= lwp
->lwp_ru
.majflt
;
819 p
->p_ru
.nswap
+= lwp
->lwp_ru
.nswap
;
820 p
->p_ru
.inblock
+= lwp
->lwp_ru
.inblock
;
821 p
->p_ru
.oublock
+= lwp
->lwp_ru
.oublock
;
822 p
->p_ru
.msgsnd
+= lwp
->lwp_ru
.msgsnd
;
823 p
->p_ru
.msgrcv
+= lwp
->lwp_ru
.msgrcv
;
824 p
->p_ru
.nsignals
+= lwp
->lwp_ru
.nsignals
;
825 p
->p_ru
.nvcsw
+= lwp
->lwp_ru
.nvcsw
;
826 p
->p_ru
.nivcsw
+= lwp
->lwp_ru
.nivcsw
;
827 p
->p_ru
.sysc
+= lwp
->lwp_ru
.sysc
;
828 p
->p_ru
.ioch
+= lwp
->lwp_ru
.ioch
;