4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
31 #include <sys/cpuvar.h>
32 #include <sys/thread.h>
33 #include <sys/debug.h>
34 #include <sys/msacct.h>
39 * Mega-theory block comment:
41 * Microstate accounting uses finite states and the transitions between these
42 * states to measure timing and accounting information. The state information
43 * is presently tracked for threads (via microstate accounting) and cpus (via
44 * cpu microstate accounting). In each case, these accounting mechanisms use
45 * states and transitions to measure time spent in each state instead of
46 * clock-based sampling methodologies.
48 * For microstate accounting:
49 * state transitions are accomplished by calling new_mstate() to switch between
50 * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur
51 * by calling restore_mstate() which restores a thread to its previously running
52 * state. This code is primarialy executed by the dispatcher in disp() before
53 * running a process that was put to sleep. If the thread was not in a sleeping
54 * state, this call has little effect other than to update the count of time the
55 * thread has spent waiting on run-queues in its lifetime.
57 * For cpu microstate accounting:
58 * Cpu microstate accounting is similar to the microstate accounting for threads
59 * but it tracks user, system, and idle time for cpus. Cpu microstate
60 * accounting does not track interrupt times as there is a pre-existing
61 * interrupt accounting mechanism for this purpose. Cpu microstate accounting
62 * tracks time that user threads have spent active, idle, or in the system on a
63 * given cpu. Cpu microstate accounting has fewer states which allows it to
64 * have better defined transitions. The states transition in the following
67 * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE
69 * In order to get to the idle state, the cpu microstate must first go through
70 * the system state, and vice-versa for the user state from idle. The switching
71 * of the microstates from user to system is done as part of the regular thread
72 * microstate accounting code, except for the idle state which is switched by
73 * the dispatcher before it runs the idle loop.
76 * Cpu percentages are now handled by and based upon microstate accounting
77 * information (the same is true for load averages). The routines which handle
78 * the growing/shrinking and exponentiation of cpu percentages have been moved
79 * here as it now makes more sense for them to be generated from the microstate
80 * code. Cpu percentages are generated similarly to the way they were before;
81 * however, now they are based upon high-resolution timestamps and the
82 * timestamps are modified at various state changes instead of during a clock()
83 * interrupt. This allows us to generate more accurate cpu percentages which
84 * are also in-sync with microstate data.
88 * Initialize the microstate level and the
89 * associated accounting information for an LWP.
100 ASSERT(init_state
!= LMS_WAIT_CPU
);
101 ASSERT((unsigned)init_state
< NMSTATES
);
103 if ((lwp
= ttolwp(t
)) != NULL
) {
104 ms
= &lwp
->lwp_mstate
;
105 curtime
= gethrtime_unscaled();
106 ms
->ms_prev
= LMS_SYSTEM
;
107 ms
->ms_start
= curtime
;
109 ms
->ms_state_start
= curtime
;
110 t
->t_mstate
= init_state
;
112 t
->t_hrtime
= curtime
;
113 if ((t
->t_proc_flag
& TP_MSACCT
) == 0)
114 t
->t_proc_flag
|= TP_MSACCT
;
115 bzero((caddr_t
)&ms
->ms_acct
[0], sizeof (ms
->ms_acct
));
120 * Initialize the microstate level and associated accounting information
121 * for the specified cpu
129 ASSERT(init_state
!= CMS_DISABLED
);
131 cpu
->cpu_mstate
= init_state
;
132 cpu
->cpu_mstate_start
= gethrtime_unscaled();
134 bzero((caddr_t
)&cpu
->cpu_acct
[0], sizeof (cpu
->cpu_acct
));
138 * sets cpu state to OFFLINE. We don't actually track this time,
139 * but it serves as a useful placeholder state for when we're not
144 term_cpu_mstate(struct cpu
*cpu
)
146 ASSERT(cpu
->cpu_mstate
!= CMS_DISABLED
);
147 cpu
->cpu_mstate
= CMS_DISABLED
;
148 cpu
->cpu_mstate_start
= 0;
151 /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */
153 #define NEW_CPU_MSTATE(state) \
154 gen = cpu->cpu_mstate_gen; \
155 cpu->cpu_mstate_gen = 0; \
156 /* Need membar_producer() here if stores not ordered / TSO */ \
157 cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \
158 cpu->cpu_mstate = state; \
159 cpu->cpu_mstate_start = curtime; \
160 /* Need membar_producer() here if stores not ordered / TSO */ \
161 cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen;
164 new_cpu_mstate(int cmstate
, hrtime_t curtime
)
169 ASSERT(cpu
->cpu_mstate
!= CMS_DISABLED
);
170 ASSERT(cmstate
< NCMSTATES
);
171 ASSERT(cmstate
!= CMS_DISABLED
);
174 * This function cannot be re-entrant on a given CPU. As such,
175 * we ASSERT and panic if we are called on behalf of an interrupt.
176 * The one exception is for an interrupt which has previously
177 * blocked. Such an interrupt is being scheduled by the dispatcher
178 * just like a normal thread, and as such cannot arrive here
179 * in a re-entrant manner.
182 ASSERT(!CPU_ON_INTR(cpu
) && curthread
->t_intr
== NULL
);
183 ASSERT(curthread
->t_preempt
> 0 || curthread
== cpu
->cpu_idle_thread
);
186 * LOCKING, or lack thereof:
188 * Updates to CPU mstate can only be made by the CPU
189 * itself, and the above check to ignore interrupts
190 * should prevent recursion into this function on a given
191 * processor. i.e. no possible write contention.
193 * However, reads of CPU mstate can occur at any time
194 * from any CPU. Any locking added to this code path
195 * would seriously impact syscall performance. So,
196 * instead we have a best-effort protection for readers.
197 * The reader will want to account for any time between
198 * cpu_mstate_start and the present time. This requires
199 * some guarantees that the reader is getting coherent
202 * We use a generation counter, which is set to 0 before
203 * we start making changes, and is set to a new value
204 * after we're done. Someone reading the CPU mstate
205 * should check for the same non-zero value of this
206 * counter both before and after reading all state. The
207 * important point is that the reader is not a
208 * performance-critical path, but this function is.
210 * The ordering of writes is critical. cpu_mstate_gen must
211 * be visibly zero on all CPUs before we change cpu_mstate
212 * and cpu_mstate_start. Additionally, cpu_mstate_gen must
213 * not be restored to oldgen+1 until after all of the other
214 * writes have become visible.
216 * Normally one puts membar_producer() calls to accomplish
217 * this. Unfortunately this routine is extremely performance
218 * critical (esp. in syscall_mstate below) and we cannot
219 * afford the additional time, particularly on some x86
220 * architectures with extremely slow sfence calls. On a
221 * CPU which guarantees write ordering (including sparc, x86,
222 * and amd64) this is not a problem. The compiler could still
223 * reorder the writes, so we make the four cpu fields
224 * volatile to prevent this.
226 * TSO warning: should we port to a non-TSO (or equivalent)
227 * CPU, this will break.
229 * The reader stills needs the membar_consumer() calls because,
230 * although the volatiles prevent the compiler from reordering
231 * loads, the CPU can still do so.
234 NEW_CPU_MSTATE(cmstate
);
238 * Return an aggregation of user and system CPU time consumed by
239 * the specified thread in scaled nanoseconds.
242 mstate_thread_onproc_time(kthread_t
*t
)
247 hrtime_t state_start
;
252 ASSERT(THREAD_LOCK_HELD(t
));
254 if ((lwp
= ttolwp(t
)) == NULL
)
257 mstate
= t
->t_mstate
;
258 waitrq
= t
->t_waitrq
;
259 ms
= &lwp
->lwp_mstate
;
260 state_start
= ms
->ms_state_start
;
262 aggr_time
= ms
->ms_acct
[LMS_USER
] +
263 ms
->ms_acct
[LMS_SYSTEM
] + ms
->ms_acct
[LMS_TRAP
];
265 now
= gethrtime_unscaled();
268 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
269 * inconsistent, so it is possible that now < state_start.
271 if (mstate
== LMS_USER
|| mstate
== LMS_SYSTEM
|| mstate
== LMS_TRAP
) {
272 /* if waitrq is zero, count all of the time. */
277 if (waitrq
> state_start
) {
278 aggr_time
+= waitrq
- state_start
;
282 scalehrtime(&aggr_time
);
287 * Return the amount of onproc and runnable time this thread has experienced.
289 * Because the fields we read are not protected by locks when updated
290 * by the thread itself, this is an inherently racey interface. In
291 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
292 * as it might appear to.
294 * The implication for users of this interface is that onproc and runnable
295 * are *NOT* monotonically increasing; they may temporarily be larger than
299 mstate_systhread_times(kthread_t
*t
, hrtime_t
*onproc
, hrtime_t
*runnable
)
301 struct mstate
*const ms
= &ttolwp(t
)->lwp_mstate
;
305 hrtime_t state_start
;
310 ASSERT(THREAD_LOCK_HELD(t
));
311 ASSERT(t
->t_procp
->p_flag
& SSYS
);
312 ASSERT(ttolwp(t
) != NULL
);
314 /* shouldn't be any non-SYSTEM on-CPU time */
315 ASSERT(ms
->ms_acct
[LMS_USER
] == 0);
316 ASSERT(ms
->ms_acct
[LMS_TRAP
] == 0);
318 mstate
= t
->t_mstate
;
319 waitrq
= t
->t_waitrq
;
320 state_start
= ms
->ms_state_start
;
322 aggr_onp
= ms
->ms_acct
[LMS_SYSTEM
];
323 aggr_run
= ms
->ms_acct
[LMS_WAIT_CPU
];
325 now
= gethrtime_unscaled();
327 /* if waitrq == 0, then there is no time to account to TS_RUN */
331 /* If there is system time to accumulate, do so */
332 if (mstate
== LMS_SYSTEM
&& state_start
< waitrq
)
333 aggr_onp
+= waitrq
- state_start
;
336 aggr_run
+= now
- waitrq
;
338 scalehrtime(&aggr_onp
);
339 scalehrtime(&aggr_run
);
342 *runnable
= aggr_run
;
346 * Return an aggregation of microstate times in scaled nanoseconds (high-res
347 * time). This keeps in mind that p_acct is already scaled, and ms_acct is
351 mstate_aggr_state(proc_t
*p
, int a_state
)
359 ASSERT(MUTEX_HELD(&p
->p_lock
));
360 ASSERT((unsigned)a_state
< NMSTATES
);
362 aggr_time
= p
->p_acct
[a_state
];
363 if (a_state
== LMS_SYSTEM
)
364 aggr_time
+= p
->p_acct
[LMS_TRAP
];
371 if (t
->t_proc_flag
& TP_LWPEXIT
)
375 ms
= &lwp
->lwp_mstate
;
376 scaledtime
= ms
->ms_acct
[a_state
];
377 scalehrtime(&scaledtime
);
378 aggr_time
+= scaledtime
;
379 if (a_state
== LMS_SYSTEM
) {
380 scaledtime
= ms
->ms_acct
[LMS_TRAP
];
381 scalehrtime(&scaledtime
);
382 aggr_time
+= scaledtime
;
384 } while ((t
= t
->t_forw
) != p
->p_tlist
);
391 syscall_mstate(int fromms
, int toms
)
393 kthread_t
*t
= curthread
;
394 zone_t
*z
= ttozone(t
);
403 if ((lwp
= ttolwp(t
)) == NULL
)
406 ASSERT(fromms
< NMSTATES
);
407 ASSERT(toms
< NMSTATES
);
409 ms
= &lwp
->lwp_mstate
;
410 mstimep
= &ms
->ms_acct
[fromms
];
411 curtime
= gethrtime_unscaled();
412 newtime
= curtime
- ms
->ms_state_start
;
413 while (newtime
< 0) {
414 curtime
= gethrtime_unscaled();
415 newtime
= curtime
- ms
->ms_state_start
;
418 if (fromms
== LMS_USER
)
419 atomic_add_64(&z
->zone_utime
, newtime
);
420 else if (fromms
== LMS_SYSTEM
)
421 atomic_add_64(&z
->zone_stime
, newtime
);
423 ms
->ms_state_start
= curtime
;
424 ms
->ms_prev
= fromms
;
425 kpreempt_disable(); /* don't change CPU while changing CPU's state */
427 ASSERT(cpu
== t
->t_cpu
);
428 if ((toms
!= LMS_USER
) && (cpu
->cpu_mstate
!= CMS_SYSTEM
)) {
429 NEW_CPU_MSTATE(CMS_SYSTEM
);
430 } else if ((toms
== LMS_USER
) && (cpu
->cpu_mstate
!= CMS_USER
)) {
431 NEW_CPU_MSTATE(CMS_USER
);
436 #undef NEW_CPU_MSTATE
439 * The following is for computing the percentage of cpu time used recently
440 * by an lwp. The function cpu_decay() is also called from /proc code.
443 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude,
444 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1].
446 * Scaling for 64-bit scaled integer:
447 * The binary point is to the right of the high-order bit
448 * of the low-order 32-bit word.
452 #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */
455 uint_t expx_cnt
= 0; /* number of calls to exp_x() */
456 uint_t expx_mul
= 0; /* number of long multiplies in exp_x() */
471 * exp(-x) = exp(-x/2) * exp(-x/2)
472 * we keep halving x until it becomes small enough for
473 * the following approximation to be accurate enough:
475 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below).
476 * Our final error will be smaller than 4% .
480 * Use a uint64_t for the initial shift calculation.
482 ull
= x
>> (LSHIFT
-2);
486 * A number this large produces effectively 0 (actually .005).
487 * This way, we will never do more than 5 multiplies.
492 ui
= ull
; /* OK. Now we can use a uint_t. */
493 for (i
= 0; ui
!= 0; i
++)
498 expx_mul
+= i
; /* seldom happens */
504 * Now we compute 1 - x and square it the number of times
505 * that we halved x above to produce the final result:
509 x
= (x
* x
) >> LSHIFT
;
515 * Given the old percent cpu and a time delta in nanoseconds,
516 * return the new decayed percent cpu: pct * exp(-tau),
517 * where 'tau' is the time delta multiplied by a decay factor.
518 * We have chosen the decay factor (cpu_decay_factor in param.c)
519 * to make the decay over five seconds be approximately 20%.
521 * 'pct' is a 32-bit scaled integer <= 1
522 * The binary point is to the right of the high-order bit
523 * of the 32-bit word.
526 cpu_decay(uint32_t pct
, hrtime_t nsec
)
528 uint64_t delta
= (uint64_t)nsec
;
530 delta
/= cpu_decay_factor
;
531 return ((pct
* exp_x(delta
)) >> LSHIFT
);
535 * Given the old percent cpu and a time delta in nanoseconds,
536 * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau)
539 cpu_grow(uint32_t pct
, hrtime_t nsec
)
541 return (LSI_ONE
- cpu_decay(LSI_ONE
- pct
, nsec
));
546 * Defined to determine whether a lwp is still on a processor.
549 #define T_ONPROC(kt) \
550 ((kt)->t_mstate < LMS_SLEEP)
551 #define T_OFFPROC(kt) \
552 ((kt)->t_mstate >= LMS_SLEEP)
555 cpu_update_pct(kthread_t
*t
, hrtime_t newtime
)
563 * This routine can get called at PIL > 0, this *has* to be
564 * done atomically. Holding locks here causes bad things to happen.
569 if (T_ONPROC(t
) && t
->t_waitrq
== 0) {
571 delta
= newtime
- hrlb
;
573 newtime
= gethrtime_unscaled();
574 delta
= newtime
- hrlb
;
576 t
->t_hrtime
= newtime
;
578 pctcpu
= t
->t_pctcpu
;
579 npctcpu
= cpu_grow(pctcpu
, delta
);
582 delta
= newtime
- hrlb
;
584 newtime
= gethrtime_unscaled();
585 delta
= newtime
- hrlb
;
587 t
->t_hrtime
= newtime
;
589 pctcpu
= t
->t_pctcpu
;
590 npctcpu
= cpu_decay(pctcpu
, delta
);
592 } while (atomic_cas_32(&t
->t_pctcpu
, pctcpu
, npctcpu
) != pctcpu
);
598 * Change the microstate level for the LWP and update the
599 * associated accounting information. Return the previous
603 new_mstate(kthread_t
*t
, int new_state
)
616 ASSERT(new_state
!= LMS_WAIT_CPU
);
617 ASSERT((unsigned)new_state
< NMSTATES
);
618 ASSERT(t
== curthread
|| THREAD_LOCK_HELD(t
));
621 * Don't do microstate processing for threads without a lwp (kernel
622 * threads). Also, if we're an interrupt thread that is pinning another
623 * thread, our t_mstate hasn't been initialized. We'd be modifying the
624 * microstate of the underlying lwp which doesn't realize that it's
625 * pinned. In this case, also don't change the microstate.
627 if (((lwp
= ttolwp(t
)) == NULL
) || t
->t_intr
)
630 curtime
= gethrtime_unscaled();
632 /* adjust cpu percentages before we go any further */
633 (void) cpu_update_pct(t
, curtime
);
635 ms
= &lwp
->lwp_mstate
;
637 origstart
= ms
->ms_state_start
;
644 mstimep
= &ms
->ms_acct
[LMS_SYSTEM
];
647 mstimep
= &ms
->ms_acct
[state
];
650 ztime
= newtime
= curtime
- ms
->ms_state_start
;
652 curtime
= gethrtime_unscaled();
653 oldtime
= *mstimep
- 1; /* force CAS to fail */
658 t
->t_mstate
= new_state
;
659 ms
->ms_state_start
= curtime
;
660 } while (atomic_cas_64((uint64_t *)mstimep
, oldtime
, newtime
) !=
664 * When the system boots the initial startup thread will have a
665 * ms_state_start of 0 which would add a huge system time to the global
666 * zone. We want to skip aggregating that initial bit of work.
668 if (origstart
!= 0) {
670 if (state
== LMS_USER
)
671 atomic_add_64(&z
->zone_utime
, ztime
);
672 else if (state
== LMS_SYSTEM
)
673 atomic_add_64(&z
->zone_stime
, ztime
);
677 * Remember the previous running microstate.
679 if (state
!= LMS_SLEEP
&& state
!= LMS_STOPPED
)
683 * Switch CPU microstate if appropriate
686 kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */
687 ASSERT(t
->t_cpu
== CPU
);
688 if (!CPU_ON_INTR(t
->t_cpu
) && curthread
->t_intr
== NULL
) {
689 if (new_state
== LMS_USER
&& t
->t_cpu
->cpu_mstate
!= CMS_USER
)
690 new_cpu_mstate(CMS_USER
, curtime
);
691 else if (new_state
!= LMS_USER
&&
692 t
->t_cpu
->cpu_mstate
!= CMS_SYSTEM
)
693 new_cpu_mstate(CMS_SYSTEM
, curtime
);
697 return (ms
->ms_prev
);
701 * Restore the LWP microstate to the previous runnable state.
702 * Called from disp() with the newly selected lwp.
705 restore_mstate(kthread_t
*t
)
718 * Don't call restore mstate of threads without lwps. (Kernel threads)
720 * threads with t_intr set shouldn't be in the dispatcher, so assert
721 * that nobody here has t_intr.
723 ASSERT(t
->t_intr
== NULL
);
725 if ((lwp
= ttolwp(t
)) == NULL
)
728 curtime
= gethrtime_unscaled();
729 (void) cpu_update_pct(t
, curtime
);
730 ms
= &lwp
->lwp_mstate
;
731 ASSERT((unsigned)t
->t_mstate
< NMSTATES
);
733 switch (t
->t_mstate
) {
736 * Update the timer for the current sleep state.
738 ASSERT((unsigned)ms
->ms_prev
< NMSTATES
);
739 switch (ms
->ms_prev
) {
744 mstimep
= &ms
->ms_acct
[ms
->ms_prev
];
747 mstimep
= &ms
->ms_acct
[LMS_SLEEP
];
751 * Return to the previous run state.
753 t
->t_mstate
= ms
->ms_prev
;
756 mstimep
= &ms
->ms_acct
[LMS_STOPPED
];
758 * Return to the previous run state.
760 t
->t_mstate
= ms
->ms_prev
;
766 mstimep
= &ms
->ms_acct
[LMS_SYSTEM
];
769 mstimep
= &ms
->ms_acct
[t
->t_mstate
];
772 waitrq
= t
->t_waitrq
; /* hopefully atomic */
777 newtime
= waitrq
- ms
->ms_state_start
;
779 curtime
= gethrtime_unscaled();
780 oldtime
= *mstimep
- 1; /* force CAS to fail */
785 } while (atomic_cas_64((uint64_t *)mstimep
, oldtime
, newtime
) !=
789 * Update the WAIT_CPU timer and per-cpu waitrq total.
792 waittime
= curtime
- waitrq
;
793 ms
->ms_acct
[LMS_WAIT_CPU
] += waittime
;
794 atomic_add_64(&z
->zone_wtime
, waittime
);
795 CPU
->cpu_waitrq
+= waittime
;
796 ms
->ms_state_start
= curtime
;
800 * Copy lwp microstate accounting and resource usage information
801 * to the process. (lwp is terminating)
804 term_mstate(kthread_t
*t
)
807 proc_t
*p
= ttoproc(t
);
808 klwp_t
*lwp
= ttolwp(t
);
812 ASSERT(MUTEX_HELD(&p
->p_lock
));
814 ms
= &lwp
->lwp_mstate
;
815 (void) new_mstate(t
, LMS_STOPPED
);
816 ms
->ms_term
= ms
->ms_state_start
;
817 tmp
= ms
->ms_term
- ms
->ms_start
;
820 for (i
= 0; i
< NMSTATES
; i
++) {
821 tmp
= ms
->ms_acct
[i
];
825 p
->p_ru
.minflt
+= lwp
->lwp_ru
.minflt
;
826 p
->p_ru
.majflt
+= lwp
->lwp_ru
.majflt
;
827 p
->p_ru
.nswap
+= lwp
->lwp_ru
.nswap
;
828 p
->p_ru
.inblock
+= lwp
->lwp_ru
.inblock
;
829 p
->p_ru
.oublock
+= lwp
->lwp_ru
.oublock
;
830 p
->p_ru
.msgsnd
+= lwp
->lwp_ru
.msgsnd
;
831 p
->p_ru
.msgrcv
+= lwp
->lwp_ru
.msgrcv
;
832 p
->p_ru
.nsignals
+= lwp
->lwp_ru
.nsignals
;
833 p
->p_ru
.nvcsw
+= lwp
->lwp_ru
.nvcsw
;
834 p
->p_ru
.nivcsw
+= lwp
->lwp_ru
.nivcsw
;
835 p
->p_ru
.sysc
+= lwp
->lwp_ru
.sysc
;
836 p
->p_ru
.ioch
+= lwp
->lwp_ru
.ioch
;