4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/param.h>
27 #include <sys/thread.h>
28 #include <sys/cpuvar.h>
29 #include <sys/inttypes.h>
30 #include <sys/cmn_err.h>
32 #include <sys/ksynch.h>
33 #include <sys/systm.h>
35 #include <sys/cpc_impl.h>
36 #include <sys/cpc_pcbe.h>
37 #include <sys/atomic.h>
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
41 #include <sys/archsystm.h>
42 #include <sys/promif.h>
43 #include <sys/x_call.h>
44 #include <sys/cap_util.h>
46 #include <asm/clock.h>
47 #include <sys/xc_levels.h>
50 static kmutex_t kcpc_ctx_llock
[CPC_HASH_BUCKETS
]; /* protects ctx_list */
51 static kcpc_ctx_t
*kcpc_ctx_list
[CPC_HASH_BUCKETS
]; /* head of list */
54 krwlock_t kcpc_cpuctx_lock
; /* lock for 'kcpc_cpuctx' below */
55 int kcpc_cpuctx
; /* number of cpu-specific contexts */
57 int kcpc_counts_include_idle
= 1; /* Project Private /etc/system variable */
60 * These are set when a PCBE module is loaded.
62 uint_t cpc_ncounters
= 0;
63 pcbe_ops_t
*pcbe_ops
= NULL
;
66 * Statistics on (mis)behavior
68 static uint32_t kcpc_intrctx_count
; /* # overflows in an interrupt handler */
69 static uint32_t kcpc_nullctx_count
; /* # overflows in a thread with no ctx */
72 * By setting 'kcpc_nullctx_panic' to 1, any overflow interrupts in a thread
73 * with no valid context will result in a panic.
75 static int kcpc_nullctx_panic
= 0;
77 static void kcpc_lwp_create(kthread_t
*t
, kthread_t
*ct
);
78 static void kcpc_restore(kcpc_ctx_t
*ctx
);
79 static void kcpc_save(kcpc_ctx_t
*ctx
);
80 static void kcpc_ctx_clone(kcpc_ctx_t
*ctx
, kcpc_ctx_t
*cctx
);
81 static int kcpc_tryassign(kcpc_set_t
*set
, int starting_req
, int *scratch
);
82 static kcpc_set_t
*kcpc_dup_set(kcpc_set_t
*set
);
83 static kcpc_set_t
*kcpc_set_create(kcpc_request_t
*reqs
, int nreqs
,
84 int set_flags
, int kmem_flags
);
87 * Macros to manipulate context flags. All flag updates should use one of these
90 * Flags should be always be updated atomically since some of the updates are
91 * not protected by locks.
93 #define KCPC_CTX_FLAG_SET(ctx, flag) atomic_or_uint(&(ctx)->kc_flags, (flag))
94 #define KCPC_CTX_FLAG_CLR(ctx, flag) atomic_and_uint(&(ctx)->kc_flags, ~(flag))
97 * The IS_HIPIL() macro verifies that the code is executed either from a
98 * cross-call or from high-PIL interrupt
101 #define IS_HIPIL() (getpil() >= XCALL_PIL)
107 extern int kcpc_hw_load_pcbe(void);
110 * Return value from kcpc_hw_load_pcbe()
112 static int kcpc_pcbe_error
= 0;
115 * Perform one-time initialization of kcpc framework.
116 * This function performs the initialization only the first time it is called.
117 * It is safe to call it multiple times.
123 static uint32_t kcpc_initialized
= 0;
126 * We already tried loading platform pcbe module and failed
128 if (kcpc_pcbe_error
!= 0)
132 * The kcpc framework should be initialized at most once
134 if (atomic_cas_32(&kcpc_initialized
, 0, 1) != 0)
137 rw_init(&kcpc_cpuctx_lock
, NULL
, RW_DEFAULT
, NULL
);
138 for (hash
= 0; hash
< CPC_HASH_BUCKETS
; hash
++)
139 mutex_init(&kcpc_ctx_llock
[hash
],
140 NULL
, MUTEX_DRIVER
, (void *)(uintptr_t)15);
143 * Load platform-specific pcbe module
145 kcpc_pcbe_error
= kcpc_hw_load_pcbe();
147 return (kcpc_pcbe_error
== 0 ? 0 : -1);
151 kcpc_register_pcbe(pcbe_ops_t
*ops
)
154 cpc_ncounters
= pcbe_ops
->pcbe_ncounters();
158 kcpc_register_dcpc(void (*func
)(uint64_t))
160 dtrace_cpc_fire
= func
;
164 kcpc_unregister_dcpc(void)
166 dtrace_cpc_fire
= NULL
;
170 kcpc_bind_cpu(kcpc_set_t
*set
, processorid_t cpuid
, int *subcode
)
177 ctx
= kcpc_ctx_alloc(KM_SLEEP
);
179 if (kcpc_assign_reqs(set
, ctx
) != 0) {
181 *subcode
= CPC_RESOURCE_UNAVAIL
;
185 ctx
->kc_cpuid
= cpuid
;
186 ctx
->kc_thread
= curthread
;
188 set
->ks_data
= kmem_zalloc(set
->ks_nreqs
* sizeof (uint64_t), KM_SLEEP
);
190 if ((error
= kcpc_configure_reqs(ctx
, set
, subcode
)) != 0) {
191 kmem_free(set
->ks_data
, set
->ks_nreqs
* sizeof (uint64_t));
200 * We must hold cpu_lock to prevent DR, offlining, or unbinding while
201 * we are manipulating the cpu_t and programming the hardware, else the
202 * the cpu_t could go away while we're looking at it.
204 mutex_enter(&cpu_lock
);
209 * The CPU could have been DRd out while we were getting set up.
213 mutex_enter(&cp
->cpu_cpc_ctxlock
);
215 save_spl
= spl_xcall();
218 * Check to see whether counters for CPU already being used by someone
219 * other than kernel for capacity and utilization (since kernel will
220 * let go of counters for user in kcpc_program() below)
222 if (cp
->cpu_cpc_ctx
!= NULL
&& !CU_CPC_ON(cp
)) {
224 * If this CPU already has a bound set, return an error.
228 mutex_exit(&cp
->cpu_cpc_ctxlock
);
232 if (curthread
->t_bind_cpu
!= cpuid
) {
235 mutex_exit(&cp
->cpu_cpc_ctxlock
);
239 kcpc_program(ctx
, B_FALSE
, B_TRUE
);
244 mutex_exit(&cp
->cpu_cpc_ctxlock
);
245 mutex_exit(&cpu_lock
);
247 mutex_enter(&set
->ks_lock
);
248 set
->ks_state
|= KCPC_SET_BOUND
;
249 cv_signal(&set
->ks_condv
);
250 mutex_exit(&set
->ks_lock
);
255 mutex_exit(&cpu_lock
);
257 kmem_free(set
->ks_data
, set
->ks_nreqs
* sizeof (uint64_t));
263 kcpc_bind_thread(kcpc_set_t
*set
, kthread_t
*t
, int *subcode
)
269 * Only one set is allowed per context, so ensure there is no
273 if (t
->t_cpc_ctx
!= NULL
)
276 ctx
= kcpc_ctx_alloc(KM_SLEEP
);
279 * The context must begin life frozen until it has been properly
280 * programmed onto the hardware. This prevents the context ops from
281 * worrying about it until we're ready.
283 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_FREEZE
);
284 ctx
->kc_hrtime
= gethrtime();
286 if (kcpc_assign_reqs(set
, ctx
) != 0) {
288 *subcode
= CPC_RESOURCE_UNAVAIL
;
293 if (set
->ks_flags
& CPC_BIND_LWP_INHERIT
)
294 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_LWPINHERIT
);
298 * Permit threads to look at their own hardware counters from userland.
300 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_NONPRIV
);
303 * Create the data store for this set.
305 set
->ks_data
= kmem_alloc(set
->ks_nreqs
* sizeof (uint64_t), KM_SLEEP
);
307 if ((error
= kcpc_configure_reqs(ctx
, set
, subcode
)) != 0) {
308 kmem_free(set
->ks_data
, set
->ks_nreqs
* sizeof (uint64_t));
318 * Add a device context to the subject thread.
320 installctx(t
, ctx
, kcpc_save
, kcpc_restore
, NULL
,
321 kcpc_lwp_create
, NULL
, kcpc_free
);
324 * Ask the backend to program the hardware.
326 if (t
== curthread
) {
330 save_spl
= spl_xcall();
331 kcpc_program(ctx
, B_TRUE
, B_TRUE
);
336 * Since we are the agent LWP, we know the victim LWP is stopped
337 * until we're done here; no need to worry about preemption or
338 * migration here. We still use an atomic op to clear the flag
339 * to ensure the flags are always self-consistent; they can
340 * still be accessed from, for instance, another CPU doing a
341 * kcpc_invalidate_all().
343 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_FREEZE
);
346 mutex_enter(&set
->ks_lock
);
347 set
->ks_state
|= KCPC_SET_BOUND
;
348 cv_signal(&set
->ks_condv
);
349 mutex_exit(&set
->ks_lock
);
355 * Walk through each request in the set and ask the PCBE to configure a
356 * corresponding counter.
359 kcpc_configure_reqs(kcpc_ctx_t
*ctx
, kcpc_set_t
*set
, int *subcode
)
365 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
367 rp
= &set
->ks_req
[i
];
371 ASSERT(n
>= 0 && n
< cpc_ncounters
);
373 ASSERT(ctx
->kc_pics
[n
].kp_req
== NULL
);
375 if (rp
->kr_flags
& CPC_OVF_NOTIFY_EMT
) {
376 if ((pcbe_ops
->pcbe_caps
& CPC_CAP_OVERFLOW_INTERRUPT
)
382 * If any of the counters have requested overflow
383 * notification, we flag the context as being one that
384 * cares about overflow.
386 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_SIGOVF
);
389 rp
->kr_config
= NULL
;
390 if ((ret
= pcbe_ops
->pcbe_configure(n
, rp
->kr_event
,
391 rp
->kr_preset
, rp
->kr_flags
, rp
->kr_nattrs
, rp
->kr_attr
,
392 &(rp
->kr_config
), (void *)ctx
)) != 0) {
393 kcpc_free_configs(set
);
396 case CPC_ATTR_REQUIRES_PRIVILEGE
:
397 case CPC_HV_NO_ACCESS
:
404 ctx
->kc_pics
[n
].kp_req
= rp
;
405 rp
->kr_picp
= &ctx
->kc_pics
[n
];
406 rp
->kr_data
= set
->ks_data
+ rp
->kr_index
;
407 *rp
->kr_data
= rp
->kr_preset
;
414 kcpc_free_configs(kcpc_set_t
*set
)
418 for (i
= 0; i
< set
->ks_nreqs
; i
++)
419 if (set
->ks_req
[i
].kr_config
!= NULL
)
420 pcbe_ops
->pcbe_free(set
->ks_req
[i
].kr_config
);
424 * buf points to a user address and the data should be copied out to that
425 * address in the current process.
428 kcpc_sample(kcpc_set_t
*set
, uint64_t *buf
, hrtime_t
*hrtime
, uint64_t *tick
)
430 kcpc_ctx_t
*ctx
= set
->ks_ctx
;
433 mutex_enter(&set
->ks_lock
);
434 if ((set
->ks_state
& KCPC_SET_BOUND
) == 0) {
435 mutex_exit(&set
->ks_lock
);
438 mutex_exit(&set
->ks_lock
);
441 * Kernel preemption must be disabled while reading the hardware regs,
442 * and if this is a CPU-bound context, while checking the CPU binding of
443 * the current thread.
446 save_spl
= spl_xcall();
448 if (ctx
->kc_flags
& KCPC_CTX_INVALID
) {
454 if ((ctx
->kc_flags
& KCPC_CTX_FREEZE
) == 0) {
455 if (ctx
->kc_cpuid
!= -1) {
456 if (curthread
->t_bind_cpu
!= ctx
->kc_cpuid
) {
463 if (ctx
->kc_thread
== curthread
) {
464 uint64_t curtick
= KCPC_GET_TICK();
466 ctx
->kc_hrtime
= gethrtime_waitfree();
467 pcbe_ops
->pcbe_sample(ctx
);
468 ctx
->kc_vtick
+= curtick
- ctx
->kc_rawtick
;
469 ctx
->kc_rawtick
= curtick
;
473 * The config may have been invalidated by
474 * the pcbe_sample op.
476 if (ctx
->kc_flags
& KCPC_CTX_INVALID
) {
487 if (copyout(set
->ks_data
, buf
,
488 set
->ks_nreqs
* sizeof (uint64_t)) == -1)
490 if (copyout(&ctx
->kc_hrtime
, hrtime
, sizeof (uint64_t)) == -1)
492 if (copyout(&ctx
->kc_vtick
, tick
, sizeof (uint64_t)) == -1)
499 * Stop the counters on the CPU this context is bound to.
502 kcpc_stop_hw(kcpc_ctx_t
*ctx
)
508 if (ctx
->kc_cpuid
== CPU
->cpu_id
) {
511 cp
= cpu_get(ctx
->kc_cpuid
);
514 ASSERT(cp
!= NULL
&& cp
->cpu_cpc_ctx
== ctx
);
515 kcpc_cpu_stop(cp
, B_FALSE
);
521 kcpc_unbind(kcpc_set_t
*set
)
527 * We could be racing with the process's agent thread as it
528 * binds the set; we must wait for the set to finish binding
529 * before attempting to tear it down.
531 mutex_enter(&set
->ks_lock
);
532 while ((set
->ks_state
& KCPC_SET_BOUND
) == 0)
533 cv_wait(&set
->ks_condv
, &set
->ks_lock
);
534 mutex_exit(&set
->ks_lock
);
539 * Use kc_lock to synchronize with kcpc_restore().
541 mutex_enter(&ctx
->kc_lock
);
542 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID
);
543 mutex_exit(&ctx
->kc_lock
);
545 if (ctx
->kc_cpuid
== -1) {
548 * The context is thread-bound and therefore has a device
549 * context. It will be freed via removectx() calling
550 * freectx() calling kcpc_free().
552 if (t
== curthread
) {
556 save_spl
= spl_xcall();
557 if (!(ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
))
558 kcpc_unprogram(ctx
, B_TRUE
);
563 if (removectx(t
, ctx
, kcpc_save
, kcpc_restore
, NULL
,
564 kcpc_lwp_create
, NULL
, kcpc_free
) == 0)
565 panic("kcpc_unbind: context %p not preset on thread %p",
566 (void *)ctx
, (void *)t
);
568 (void) removectx(t
, ctx
, kcpc_save
, kcpc_restore
, NULL
,
569 kcpc_lwp_create
, NULL
, kcpc_free
);
575 * If we are unbinding a CPU-bound set from a remote CPU, the
576 * native CPU's idle thread could be in the midst of programming
577 * this context onto the CPU. We grab the context's lock here to
578 * ensure that the idle thread is done with it. When we release
579 * the lock, the CPU no longer has a context and the idle thread
582 * cpu_lock must be held to prevent the CPU from being DR'd out
583 * while we disassociate the context from the cpu_t.
586 mutex_enter(&cpu_lock
);
587 cp
= cpu_get(ctx
->kc_cpuid
);
590 * The CPU may have been DR'd out of the system.
592 mutex_enter(&cp
->cpu_cpc_ctxlock
);
593 if ((ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
) == 0)
595 ASSERT(ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
);
596 mutex_exit(&cp
->cpu_cpc_ctxlock
);
598 mutex_exit(&cpu_lock
);
599 if (ctx
->kc_thread
== curthread
) {
601 curthread
->t_cpc_set
= NULL
;
609 kcpc_preset(kcpc_set_t
*set
, int index
, uint64_t preset
)
614 ASSERT(set
->ks_state
& KCPC_SET_BOUND
);
615 ASSERT(set
->ks_ctx
->kc_thread
== curthread
);
616 ASSERT(set
->ks_ctx
->kc_cpuid
== -1);
618 if (index
< 0 || index
>= set
->ks_nreqs
)
621 for (i
= 0; i
< set
->ks_nreqs
; i
++)
622 if (set
->ks_req
[i
].kr_index
== index
)
624 ASSERT(i
!= set
->ks_nreqs
);
626 set
->ks_req
[i
].kr_preset
= preset
;
631 kcpc_restart(kcpc_set_t
*set
)
633 kcpc_ctx_t
*ctx
= set
->ks_ctx
;
637 ASSERT(set
->ks_state
& KCPC_SET_BOUND
);
638 ASSERT(ctx
->kc_thread
== curthread
);
639 ASSERT(ctx
->kc_cpuid
== -1);
641 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
642 *(set
->ks_req
[i
].kr_data
) = set
->ks_req
[i
].kr_preset
;
643 pcbe_ops
->pcbe_configure(0, NULL
, set
->ks_req
[i
].kr_preset
,
644 0, 0, NULL
, &set
->ks_req
[i
].kr_config
, NULL
);
648 save_spl
= spl_xcall();
651 * If the user is doing this on a running set, make sure the counters
654 if ((ctx
->kc_flags
& KCPC_CTX_FREEZE
) == 0)
655 pcbe_ops
->pcbe_allstop();
658 * Ask the backend to program the hardware.
660 ctx
->kc_rawtick
= KCPC_GET_TICK();
661 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_FREEZE
);
662 pcbe_ops
->pcbe_program(ctx
);
670 * Caller must hold kcpc_cpuctx_lock.
673 kcpc_enable(kthread_t
*t
, int cmd
, int enable
)
675 kcpc_ctx_t
*ctx
= t
->t_cpc_ctx
;
676 kcpc_set_t
*set
= t
->t_cpc_set
;
682 ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock
));
686 * This thread has a set but no context; it must be a
689 ASSERT(t
->t_cpc_set
!= NULL
);
690 ASSERT(t
->t_cpc_set
->ks_ctx
->kc_cpuid
!= -1);
692 } else if (ctx
->kc_flags
& KCPC_CTX_INVALID
)
695 if (cmd
== CPC_ENABLE
) {
696 if ((ctx
->kc_flags
& KCPC_CTX_FREEZE
) == 0)
699 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_FREEZE
);
702 } else if (cmd
== CPC_DISABLE
) {
703 if (ctx
->kc_flags
& KCPC_CTX_FREEZE
)
707 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_FREEZE
);
709 } else if (cmd
== CPC_USR_EVENTS
|| cmd
== CPC_SYS_EVENTS
) {
711 * Strategy for usr/sys: stop counters and update set's presets
712 * with current counter values, unbind, update requests with
713 * new config, then re-bind.
715 flag
= (cmd
== CPC_USR_EVENTS
) ?
716 CPC_COUNT_USER
: CPC_COUNT_SYSTEM
;
719 KCPC_CTX_FLAG_SET(ctx
,
720 KCPC_CTX_INVALID
| KCPC_CTX_INVALID_STOPPED
);
721 pcbe_ops
->pcbe_allstop();
724 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
725 set
->ks_req
[i
].kr_preset
= *(set
->ks_req
[i
].kr_data
);
727 set
->ks_req
[i
].kr_flags
|= flag
;
729 set
->ks_req
[i
].kr_flags
&= ~flag
;
731 newset
= kcpc_dup_set(set
);
732 if (kcpc_unbind(set
) != 0)
734 t
->t_cpc_set
= newset
;
735 if (kcpc_bind_thread(newset
, t
, &err
) != 0) {
737 kcpc_free_set(newset
);
747 * Provide PCBEs with a way of obtaining the configs of every counter which will
748 * be programmed together.
750 * If current is NULL, provide the first config.
752 * If data != NULL, caller wants to know where the data store associated with
753 * the config we return is located.
756 kcpc_next_config(void *token
, void *current
, uint64_t **data
)
760 kcpc_ctx_t
*ctx
= (kcpc_ctx_t
*)token
;
762 if (current
== NULL
) {
764 * Client would like the first config, which may not be in
765 * counter 0; we need to search through the counters for the
768 for (i
= 0; i
< cpc_ncounters
; i
++)
769 if (ctx
->kc_pics
[i
].kp_req
!= NULL
)
772 * There are no counters configured for the given context.
774 if (i
== cpc_ncounters
)
778 * There surely is a faster way to do this.
780 for (i
= 0; i
< cpc_ncounters
; i
++) {
781 pic
= &ctx
->kc_pics
[i
];
783 if (pic
->kp_req
!= NULL
&&
784 current
== pic
->kp_req
->kr_config
)
789 * We found the current config at picnum i. Now search for the
790 * next configured PIC.
792 for (i
++; i
< cpc_ncounters
; i
++) {
793 pic
= &ctx
->kc_pics
[i
];
794 if (pic
->kp_req
!= NULL
)
798 if (i
== cpc_ncounters
)
803 *data
= ctx
->kc_pics
[i
].kp_req
->kr_data
;
806 return (ctx
->kc_pics
[i
].kp_req
->kr_config
);
811 kcpc_ctx_alloc(int kmem_flags
)
816 ctx
= (kcpc_ctx_t
*)kmem_zalloc(sizeof (kcpc_ctx_t
), kmem_flags
);
820 hash
= CPC_HASH_CTX(ctx
);
821 mutex_enter(&kcpc_ctx_llock
[hash
]);
822 ctx
->kc_next
= kcpc_ctx_list
[hash
];
823 kcpc_ctx_list
[hash
] = ctx
;
824 mutex_exit(&kcpc_ctx_llock
[hash
]);
826 ctx
->kc_pics
= (kcpc_pic_t
*)kmem_zalloc(sizeof (kcpc_pic_t
) *
827 cpc_ncounters
, KM_SLEEP
);
835 * Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT
839 kcpc_ctx_clone(kcpc_ctx_t
*ctx
, kcpc_ctx_t
*cctx
)
841 kcpc_set_t
*ks
= ctx
->kc_set
, *cks
;
847 if ((ks
->ks_flags
& CPC_BIND_LWP_INHERIT
) == 0)
850 cks
= kmem_zalloc(sizeof (*cks
), KM_SLEEP
);
851 cks
->ks_state
&= ~KCPC_SET_BOUND
;
853 cks
->ks_flags
= ks
->ks_flags
;
854 cks
->ks_nreqs
= ks
->ks_nreqs
;
855 cks
->ks_req
= kmem_alloc(cks
->ks_nreqs
*
856 sizeof (kcpc_request_t
), KM_SLEEP
);
857 cks
->ks_data
= kmem_alloc(cks
->ks_nreqs
* sizeof (uint64_t),
861 for (i
= 0; i
< cks
->ks_nreqs
; i
++) {
862 cks
->ks_req
[i
].kr_index
= ks
->ks_req
[i
].kr_index
;
863 cks
->ks_req
[i
].kr_picnum
= ks
->ks_req
[i
].kr_picnum
;
864 (void) strncpy(cks
->ks_req
[i
].kr_event
,
865 ks
->ks_req
[i
].kr_event
, CPC_MAX_EVENT_LEN
);
866 cks
->ks_req
[i
].kr_preset
= ks
->ks_req
[i
].kr_preset
;
867 cks
->ks_req
[i
].kr_flags
= ks
->ks_req
[i
].kr_flags
;
868 cks
->ks_req
[i
].kr_nattrs
= ks
->ks_req
[i
].kr_nattrs
;
869 if (ks
->ks_req
[i
].kr_nattrs
> 0) {
870 cks
->ks_req
[i
].kr_attr
=
871 kmem_alloc(ks
->ks_req
[i
].kr_nattrs
*
872 sizeof (kcpc_attr_t
), KM_SLEEP
);
874 for (j
= 0; j
< ks
->ks_req
[i
].kr_nattrs
; j
++) {
875 (void) strncpy(cks
->ks_req
[i
].kr_attr
[j
].ka_name
,
876 ks
->ks_req
[i
].kr_attr
[j
].ka_name
,
878 cks
->ks_req
[i
].kr_attr
[j
].ka_val
=
879 ks
->ks_req
[i
].kr_attr
[j
].ka_val
;
882 if (kcpc_configure_reqs(cctx
, cks
, &code
) != 0)
883 kcpc_invalidate_config(cctx
);
885 mutex_enter(&cks
->ks_lock
);
886 cks
->ks_state
|= KCPC_SET_BOUND
;
887 cv_signal(&cks
->ks_condv
);
888 mutex_exit(&cks
->ks_lock
);
893 kcpc_ctx_free(kcpc_ctx_t
*ctx
)
896 long hash
= CPC_HASH_CTX(ctx
);
898 mutex_enter(&kcpc_ctx_llock
[hash
]);
899 loc
= &kcpc_ctx_list
[hash
];
900 ASSERT(*loc
!= NULL
);
902 loc
= &(*loc
)->kc_next
;
904 mutex_exit(&kcpc_ctx_llock
[hash
]);
906 kmem_free(ctx
->kc_pics
, cpc_ncounters
* sizeof (kcpc_pic_t
));
907 cv_destroy(&ctx
->kc_condv
);
908 mutex_destroy(&ctx
->kc_lock
);
909 kmem_free(ctx
, sizeof (*ctx
));
913 * Generic interrupt handler used on hardware that generates
914 * overflow interrupts.
916 * Note: executed at high-level interrupt context!
920 kcpc_overflow_intr(caddr_t arg
, uint64_t bitmap
)
923 kthread_t
*t
= curthread
;
927 * On both x86 and UltraSPARC, we may deliver the high-level
928 * interrupt in kernel mode, just after we've started to run an
929 * interrupt thread. (That's because the hardware helpfully
930 * delivers the overflow interrupt some random number of cycles
931 * after the instruction that caused the overflow by which time
932 * we're in some part of the kernel, not necessarily running on
935 * Check for this case here -- find the pinned thread
936 * that was running when the interrupt went off.
938 if (t
->t_flag
& T_INTR_THREAD
) {
941 atomic_inc_32(&kcpc_intrctx_count
);
944 * Note that t_lwp is always set to point at the underlying
945 * thread, thus this will work in the presence of nested
949 if ((lwp
= t
->t_lwp
) != NULL
) {
958 * This can easily happen if we're using the counters in
959 * "shared" mode, for example, and an overflow interrupt
960 * occurs while we are running cpustat. In that case, the
961 * bound thread that has the context that belongs to this
962 * CPU is almost certainly sleeping (if it was running on
963 * the CPU we'd have found it above), and the actual
964 * interrupted thread has no knowledge of performance counters!
966 ctx
= curthread
->t_cpu
->cpu_cpc_ctx
;
969 * Return the bound context for this CPU to
970 * the interrupt handler so that it can synchronously
971 * sample the hardware counters and restart them.
977 * As long as the overflow interrupt really is delivered early
978 * enough after trapping into the kernel to avoid switching
979 * threads, we must always be able to find the cpc context,
980 * or something went terribly wrong i.e. we ended up
981 * running a passivated interrupt thread, a kernel
982 * thread or we interrupted idle, all of which are Very Bad.
984 * We also could end up here owing to an incredibly unlikely
985 * race condition that exists on x86 based architectures when
986 * the cpc provider is in use; overflow interrupts are directed
987 * to the cpc provider if the 'dtrace_cpc_in_use' variable is
988 * set when we enter the handler. This variable is unset after
989 * overflow interrupts have been disabled on all CPUs and all
990 * contexts have been torn down. To stop interrupts, the cpc
991 * provider issues a xcall to the remote CPU before it tears
992 * down that CPUs context. As high priority xcalls, on an x86
993 * architecture, execute at a higher PIL than this handler, it
994 * is possible (though extremely unlikely) that the xcall could
995 * interrupt the overflow handler before the handler has
996 * checked the 'dtrace_cpc_in_use' variable, stop the counters,
997 * return to the cpc provider which could then rip down
998 * contexts and unset 'dtrace_cpc_in_use' *before* the CPUs
999 * overflow handler has had a chance to check the variable. In
1000 * that case, the handler would direct the overflow into this
1001 * code and no valid context will be found. The default behavior
1002 * when no valid context is found is now to shout a warning to
1003 * the console and bump the 'kcpc_nullctx_count' variable.
1005 if (kcpc_nullctx_panic
)
1006 panic("null cpc context, thread %p", (void *)t
);
1009 "null cpc context found in overflow handler!\n");
1011 atomic_inc_32(&kcpc_nullctx_count
);
1012 } else if ((ctx
->kc_flags
& KCPC_CTX_INVALID
) == 0) {
1014 * Schedule an ast to sample the counters, which will
1015 * propagate any overflow into the virtualized performance
1016 * counter(s), and may deliver a signal.
1018 ttolwp(t
)->lwp_pcb
.pcb_flags
|= CPC_OVERFLOW
;
1020 * If a counter has overflowed which was counting on behalf of
1021 * a request which specified CPC_OVF_NOTIFY_EMT, send the
1024 for (i
= 0; i
< cpc_ncounters
; i
++) {
1025 if (ctx
->kc_pics
[i
].kp_req
!= NULL
&&
1026 bitmap
& (1 << i
) &&
1027 ctx
->kc_pics
[i
].kp_req
->kr_flags
&
1028 CPC_OVF_NOTIFY_EMT
) {
1030 * A signal has been requested for this PIC, so
1031 * so freeze the context. The interrupt handler
1032 * has already stopped the counter hardware.
1034 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_FREEZE
);
1035 atomic_or_uint(&ctx
->kc_pics
[i
].kp_flags
,
1036 KCPC_PIC_OVERFLOWED
);
1040 } else if (ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
) {
1042 * Thread context is no longer valid, but here may be a valid
1045 return (curthread
->t_cpu
->cpu_cpc_ctx
);
1052 * The current thread context had an overflow interrupt; we're
1053 * executing here in high-level interrupt context.
1057 kcpc_hw_overflow_intr(caddr_t arg1
, caddr_t arg2
)
1064 if (pcbe_ops
== NULL
||
1065 (bitmap
= pcbe_ops
->pcbe_overflow_bitmap()) == 0)
1066 return (DDI_INTR_UNCLAIMED
);
1069 * Prevent any further interrupts.
1071 pcbe_ops
->pcbe_allstop();
1073 if (dtrace_cpc_in_use
) {
1074 state
= &cpu_core
[CPU
->cpu_id
].cpuc_dcpc_intr_state
;
1077 * Set the per-CPU state bit to indicate that we are currently
1078 * processing an interrupt if it is currently free. Drop the
1079 * interrupt if the state isn't free (i.e. a configuration
1080 * event is taking place).
1082 if (atomic_cas_8(state
, DCPC_INTR_FREE
,
1083 DCPC_INTR_PROCESSING
) == DCPC_INTR_FREE
) {
1087 ASSERT(dtrace_cpc_fire
!= NULL
);
1089 (*dtrace_cpc_fire
)(bitmap
);
1091 ctx
= curthread
->t_cpu
->cpu_cpc_ctx
;
1094 cmn_err(CE_NOTE
, "null cpc context in"
1095 "hardware overflow handler!\n");
1097 return (DDI_INTR_CLAIMED
);
1100 /* Reset any counters that have overflowed */
1101 for (i
= 0; i
< ctx
->kc_set
->ks_nreqs
; i
++) {
1102 req
= ctx
->kc_set
->ks_req
[i
];
1104 if (bitmap
& (1 << req
.kr_picnum
)) {
1105 pcbe_ops
->pcbe_configure(req
.kr_picnum
,
1106 req
.kr_event
, req
.kr_preset
,
1107 req
.kr_flags
, req
.kr_nattrs
,
1108 req
.kr_attr
, &(req
.kr_config
),
1112 pcbe_ops
->pcbe_program(ctx
);
1115 * We've finished processing the interrupt so set
1116 * the state back to free.
1118 cpu_core
[CPU
->cpu_id
].cpuc_dcpc_intr_state
=
1122 return (DDI_INTR_CLAIMED
);
1126 * DTrace isn't involved so pass on accordingly.
1128 * If the interrupt has occurred in the context of an lwp owning
1129 * the counters, then the handler posts an AST to the lwp to
1130 * trigger the actual sampling, and optionally deliver a signal or
1131 * restart the counters, on the way out of the kernel using
1132 * kcpc_hw_overflow_ast() (see below).
1134 * On the other hand, if the handler returns the context to us
1135 * directly, then it means that there are no other threads in
1136 * the middle of updating it, no AST has been posted, and so we
1137 * should sample the counters here, and restart them with no
1140 * The CPU's CPC context may disappear as a result of cross-call which
1141 * has higher PIL on x86, so protect the context by raising PIL to the
1144 save_spl
= spl_xcall();
1145 if ((ctx
= kcpc_overflow_intr(arg1
, bitmap
)) != NULL
) {
1146 uint64_t curtick
= KCPC_GET_TICK();
1148 ctx
->kc_hrtime
= gethrtime_waitfree();
1149 ctx
->kc_vtick
+= curtick
- ctx
->kc_rawtick
;
1150 ctx
->kc_rawtick
= curtick
;
1151 pcbe_ops
->pcbe_sample(ctx
);
1152 pcbe_ops
->pcbe_program(ctx
);
1156 return (DDI_INTR_CLAIMED
);
1160 * Called from trap() when processing the ast posted by the high-level
1161 * interrupt handler.
1166 kcpc_ctx_t
*ctx
= curthread
->t_cpc_ctx
;
1169 uint64_t curtick
= KCPC_GET_TICK();
1171 ASSERT(ctx
!= NULL
); /* Beware of interrupt skid. */
1174 * An overflow happened: sample the context to ensure that
1175 * the overflow is propagated into the upper bits of the
1176 * virtualized 64-bit counter(s).
1179 ctx
->kc_hrtime
= gethrtime_waitfree();
1180 pcbe_ops
->pcbe_sample(ctx
);
1183 ctx
->kc_vtick
+= curtick
- ctx
->kc_rawtick
;
1186 * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED
1187 * if that pic generated an overflow and if the request it was counting
1188 * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all
1189 * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we
1190 * found any overflowed pics, keep the context frozen and return true
1191 * (thus causing a signal to be sent).
1193 for (i
= 0; i
< cpc_ncounters
; i
++) {
1194 if (ctx
->kc_pics
[i
].kp_flags
& KCPC_PIC_OVERFLOWED
) {
1195 atomic_and_uint(&ctx
->kc_pics
[i
].kp_flags
,
1196 ~KCPC_PIC_OVERFLOWED
);
1204 * Otherwise, re-enable the counters and continue life as before.
1207 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_FREEZE
);
1208 pcbe_ops
->pcbe_program(ctx
);
1214 * Called when switching away from current thread.
1217 kcpc_save(kcpc_ctx_t
*ctx
)
1223 save_spl
= spl_xcall();
1225 if (ctx
->kc_flags
& KCPC_CTX_INVALID
) {
1226 if (ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
) {
1232 * This context has been invalidated but the counters have not
1233 * been stopped. Stop them here and mark the context stopped.
1235 kcpc_unprogram(ctx
, B_TRUE
);
1241 pcbe_ops
->pcbe_allstop();
1242 if (ctx
->kc_flags
& KCPC_CTX_FREEZE
) {
1249 * Need to sample for all reqs into each req's current mpic.
1251 ctx
->kc_hrtime
= gethrtime_waitfree();
1252 ctx
->kc_vtick
+= KCPC_GET_TICK() - ctx
->kc_rawtick
;
1253 pcbe_ops
->pcbe_sample(ctx
);
1256 * Program counter for measuring capacity and utilization since user
1257 * thread isn't using counter anymore
1259 ASSERT(ctx
->kc_cpuid
== -1);
1260 cu_cpc_program(CPU
, &err
);
1266 kcpc_restore(kcpc_ctx_t
*ctx
)
1270 mutex_enter(&ctx
->kc_lock
);
1272 if ((ctx
->kc_flags
& (KCPC_CTX_INVALID
| KCPC_CTX_INVALID_STOPPED
)) ==
1275 * The context is invalidated but has not been marked stopped.
1276 * We mark it as such here because we will not start the
1277 * counters during this context switch.
1279 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID_STOPPED
);
1282 if (ctx
->kc_flags
& (KCPC_CTX_INVALID
| KCPC_CTX_FREEZE
)) {
1283 mutex_exit(&ctx
->kc_lock
);
1288 * Set kc_flags to show that a kcpc_restore() is in progress to avoid
1289 * ctx & set related memory objects being freed without us knowing.
1290 * This can happen if an agent thread is executing a kcpc_unbind(),
1291 * with this thread as the target, whilst we're concurrently doing a
1292 * restorectx() during, for example, a proc_exit(). Effectively, by
1293 * doing this, we're asking kcpc_free() to cv_wait() until
1294 * kcpc_restore() has completed.
1296 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_RESTORE
);
1297 mutex_exit(&ctx
->kc_lock
);
1300 * While programming the hardware, the counters should be stopped. We
1301 * don't do an explicit pcbe_allstop() here because they should have
1302 * been stopped already by the last consumer.
1305 save_spl
= spl_xcall();
1306 kcpc_program(ctx
, B_TRUE
, B_TRUE
);
1311 * Wake the agent thread if it's waiting in kcpc_free().
1313 mutex_enter(&ctx
->kc_lock
);
1314 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_RESTORE
);
1315 cv_signal(&ctx
->kc_condv
);
1316 mutex_exit(&ctx
->kc_lock
);
1320 * If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the
1321 * following context operators to the idle thread on each CPU. They stop the
1322 * counters when the idle thread is switched on, and they start them again when
1323 * it is switched off.
1327 kcpc_idle_save(struct cpu
*cp
)
1330 * The idle thread shouldn't be run anywhere else.
1335 * We must hold the CPU's context lock to ensure the context isn't freed
1336 * while we're looking at it.
1338 mutex_enter(&cp
->cpu_cpc_ctxlock
);
1340 if ((cp
->cpu_cpc_ctx
== NULL
) ||
1341 (cp
->cpu_cpc_ctx
->kc_flags
& KCPC_CTX_INVALID
)) {
1342 mutex_exit(&cp
->cpu_cpc_ctxlock
);
1346 pcbe_ops
->pcbe_program(cp
->cpu_cpc_ctx
);
1347 mutex_exit(&cp
->cpu_cpc_ctxlock
);
1351 kcpc_idle_restore(struct cpu
*cp
)
1354 * The idle thread shouldn't be run anywhere else.
1359 * We must hold the CPU's context lock to ensure the context isn't freed
1360 * while we're looking at it.
1362 mutex_enter(&cp
->cpu_cpc_ctxlock
);
1364 if ((cp
->cpu_cpc_ctx
== NULL
) ||
1365 (cp
->cpu_cpc_ctx
->kc_flags
& KCPC_CTX_INVALID
)) {
1366 mutex_exit(&cp
->cpu_cpc_ctxlock
);
1370 pcbe_ops
->pcbe_allstop();
1371 mutex_exit(&cp
->cpu_cpc_ctxlock
);
1376 kcpc_lwp_create(kthread_t
*t
, kthread_t
*ct
)
1378 kcpc_ctx_t
*ctx
= t
->t_cpc_ctx
, *cctx
;
1381 if (ctx
== NULL
|| (ctx
->kc_flags
& KCPC_CTX_LWPINHERIT
) == 0)
1384 rw_enter(&kcpc_cpuctx_lock
, RW_READER
);
1385 if (ctx
->kc_flags
& KCPC_CTX_INVALID
) {
1386 rw_exit(&kcpc_cpuctx_lock
);
1389 cctx
= kcpc_ctx_alloc(KM_SLEEP
);
1390 kcpc_ctx_clone(ctx
, cctx
);
1391 rw_exit(&kcpc_cpuctx_lock
);
1394 * Copy the parent context's kc_flags field, but don't overwrite
1395 * the child's in case it was modified during kcpc_ctx_clone.
1397 KCPC_CTX_FLAG_SET(cctx
, ctx
->kc_flags
);
1398 cctx
->kc_thread
= ct
;
1399 cctx
->kc_cpuid
= -1;
1400 ct
->t_cpc_set
= cctx
->kc_set
;
1401 ct
->t_cpc_ctx
= cctx
;
1403 if (cctx
->kc_flags
& KCPC_CTX_SIGOVF
) {
1404 kcpc_set_t
*ks
= cctx
->kc_set
;
1406 * Our contract with the user requires us to immediately send an
1407 * overflow signal to all children if we have the LWPINHERIT
1408 * and SIGOVF flags set. In addition, all counters should be
1409 * set to UINT64_MAX, and their pic's overflow flag turned on
1410 * so that our trap() processing knows to send a signal.
1412 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_FREEZE
);
1413 for (i
= 0; i
< ks
->ks_nreqs
; i
++) {
1414 kcpc_request_t
*kr
= &ks
->ks_req
[i
];
1416 if (kr
->kr_flags
& CPC_OVF_NOTIFY_EMT
) {
1417 *(kr
->kr_data
) = UINT64_MAX
;
1418 atomic_or_uint(&kr
->kr_picp
->kp_flags
,
1419 KCPC_PIC_OVERFLOWED
);
1422 ttolwp(ct
)->lwp_pcb
.pcb_flags
|= CPC_OVERFLOW
;
1426 installctx(ct
, cctx
, kcpc_save
, kcpc_restore
,
1427 NULL
, kcpc_lwp_create
, NULL
, kcpc_free
);
1431 * Counter Stoppage Theory
1433 * The counters may need to be stopped properly at the following occasions:
1436 * 2) A thread exits.
1437 * 3) An LWP performs an exec().
1438 * 4) A bound set is unbound.
1440 * In addition to stopping the counters, the CPC context (a kcpc_ctx_t) may need
1441 * to be freed as well.
1443 * Case 1: kcpc_passivate(), called via lwp_exit(), stops the counters. Later on
1444 * when the thread is freed, kcpc_free(), called by freectx(), frees the
1447 * Case 2: same as case 1 except kcpc_passivate is called from thread_exit().
1449 * Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has
1450 * been called from exec. It stops the counters _and_ frees the context.
1452 * Case 4: kcpc_unbind() stops the hardware _and_ frees the context.
1454 * CPU-bound counters are always stopped via kcpc_unbind().
1458 * We're being called to delete the context; we ensure that all associated data
1459 * structures are freed, and that the hardware is passivated if this is an exec.
1464 kcpc_free(kcpc_ctx_t
*ctx
, int isexec
)
1467 kcpc_set_t
*set
= ctx
->kc_set
;
1469 ASSERT(set
!= NULL
);
1472 * Wait for kcpc_restore() to finish before we tear things down.
1474 mutex_enter(&ctx
->kc_lock
);
1475 while (ctx
->kc_flags
& KCPC_CTX_RESTORE
)
1476 cv_wait(&ctx
->kc_condv
, &ctx
->kc_lock
);
1477 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID
);
1478 mutex_exit(&ctx
->kc_lock
);
1482 * This thread is execing, and after the exec it should not have
1483 * any performance counter context. Stop the counters properly
1484 * here so the system isn't surprised by an overflow interrupt
1487 if (ctx
->kc_cpuid
!= -1) {
1490 * CPU-bound context; stop the appropriate CPU's ctrs.
1491 * Hold cpu_lock while examining the CPU to ensure it
1494 mutex_enter(&cpu_lock
);
1495 cp
= cpu_get(ctx
->kc_cpuid
);
1497 * The CPU could have been DR'd out, so only stop the
1498 * CPU and clear its context pointer if the CPU still
1502 mutex_enter(&cp
->cpu_cpc_ctxlock
);
1504 mutex_exit(&cp
->cpu_cpc_ctxlock
);
1506 mutex_exit(&cpu_lock
);
1507 ASSERT(curthread
->t_cpc_ctx
== NULL
);
1512 * Thread-bound context; stop _this_ CPU's counters.
1515 save_spl
= spl_xcall();
1516 kcpc_unprogram(ctx
, B_TRUE
);
1517 curthread
->t_cpc_ctx
= NULL
;
1523 * Since we are being called from an exec and we know that
1524 * exec is not permitted via the agent thread, we should clean
1525 * up this thread's CPC state completely, and not leave dangling
1526 * CPC pointers behind.
1528 ASSERT(ctx
->kc_thread
== curthread
);
1529 curthread
->t_cpc_set
= NULL
;
1533 * Walk through each request in this context's set and free the PCBE's
1534 * configuration if it exists.
1536 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
1537 if (set
->ks_req
[i
].kr_config
!= NULL
)
1538 pcbe_ops
->pcbe_free(set
->ks_req
[i
].kr_config
);
1541 kmem_free(set
->ks_data
, set
->ks_nreqs
* sizeof (uint64_t));
1547 * Free the memory associated with a request set.
1550 kcpc_free_set(kcpc_set_t
*set
)
1553 kcpc_request_t
*req
;
1555 ASSERT(set
->ks_req
!= NULL
);
1557 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
1558 req
= &set
->ks_req
[i
];
1560 if (req
->kr_nattrs
!= 0) {
1561 kmem_free(req
->kr_attr
,
1562 req
->kr_nattrs
* sizeof (kcpc_attr_t
));
1566 kmem_free(set
->ks_req
, sizeof (kcpc_request_t
) * set
->ks_nreqs
);
1567 cv_destroy(&set
->ks_condv
);
1568 mutex_destroy(&set
->ks_lock
);
1569 kmem_free(set
, sizeof (kcpc_set_t
));
1573 * Grab every existing context and mark it as invalid.
1576 kcpc_invalidate_all(void)
1581 for (hash
= 0; hash
< CPC_HASH_BUCKETS
; hash
++) {
1582 mutex_enter(&kcpc_ctx_llock
[hash
]);
1583 for (ctx
= kcpc_ctx_list
[hash
]; ctx
; ctx
= ctx
->kc_next
)
1584 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID
);
1585 mutex_exit(&kcpc_ctx_llock
[hash
]);
1590 * Interface for PCBEs to signal that an existing configuration has suddenly
1594 kcpc_invalidate_config(void *token
)
1596 kcpc_ctx_t
*ctx
= token
;
1598 ASSERT(ctx
!= NULL
);
1600 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID
);
1604 * Called from lwp_exit() and thread_exit()
1607 kcpc_passivate(void)
1609 kcpc_ctx_t
*ctx
= curthread
->t_cpc_ctx
;
1610 kcpc_set_t
*set
= curthread
->t_cpc_set
;
1618 * This thread has a set but no context; it must be a CPU-bound
1619 * set. The hardware will be stopped via kcpc_unbind() when the
1620 * process exits and closes its file descriptors with
1621 * kcpc_close(). Our only job here is to clean up this thread's
1622 * state; the set will be freed with the unbind().
1624 (void) kcpc_unbind(set
);
1626 * Unbinding a set belonging to the current thread should clear
1629 ASSERT(curthread
->t_cpc_set
== NULL
);
1634 save_spl
= spl_xcall();
1635 curthread
->t_cpc_set
= NULL
;
1638 * This thread/LWP is exiting but context switches will continue to
1639 * happen for a bit as the exit proceeds. Kernel preemption must be
1640 * disabled here to prevent a race between checking or setting the
1641 * INVALID_STOPPED flag here and kcpc_restore() setting the flag during
1644 if ((ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
) == 0) {
1645 kcpc_unprogram(ctx
, B_TRUE
);
1646 KCPC_CTX_FLAG_SET(ctx
,
1647 KCPC_CTX_INVALID
| KCPC_CTX_INVALID_STOPPED
);
1651 * We're cleaning up after this thread; ensure there are no dangling
1652 * CPC pointers left behind. The context and set will be freed by
1655 curthread
->t_cpc_ctx
= NULL
;
1662 * Assign the requests in the given set to the PICs in the context.
1663 * Returns 0 if successful, -1 on failure.
1667 kcpc_assign_reqs(kcpc_set_t
*set
, kcpc_ctx_t
*ctx
)
1672 ASSERT(set
->ks_nreqs
<= cpc_ncounters
);
1675 * Provide kcpc_tryassign() with scratch space to avoid doing an
1676 * alloc/free with every invocation.
1678 picnum_save
= kmem_alloc(set
->ks_nreqs
* sizeof (int), KM_SLEEP
);
1680 * kcpc_tryassign() blindly walks through each request in the set,
1681 * seeing if a counter can count its event. If yes, it assigns that
1682 * counter. However, that counter may have been the only capable counter
1683 * for _another_ request's event. The solution is to try every possible
1684 * request first. Note that this does not cover all solutions, as
1685 * that would require all unique orderings of requests, an n^n operation
1686 * which would be unacceptable for architectures with many counters.
1688 for (i
= 0; i
< set
->ks_nreqs
; i
++)
1689 if (kcpc_tryassign(set
, i
, picnum_save
) == 0)
1692 kmem_free(picnum_save
, set
->ks_nreqs
* sizeof (int));
1693 if (i
== set
->ks_nreqs
)
1699 kcpc_tryassign(kcpc_set_t
*set
, int starting_req
, int *scratch
)
1703 uint64_t bitmap
= 0, resmap
= 0;
1707 * We are attempting to assign the reqs to pics, but we may fail. If we
1708 * fail, we need to restore the state of the requests to what it was
1709 * when we found it, as some reqs may have been explicitly assigned to
1710 * a specific PIC beforehand. We do this by snapshotting the assignments
1711 * now and restoring from it later if we fail.
1713 * Also we note here which counters have already been claimed by
1714 * requests with explicit counter assignments.
1716 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
1717 scratch
[i
] = set
->ks_req
[i
].kr_picnum
;
1718 if (set
->ks_req
[i
].kr_picnum
!= -1)
1719 resmap
|= (1 << set
->ks_req
[i
].kr_picnum
);
1723 * Walk through requests assigning them to the first PIC that is
1728 if (set
->ks_req
[i
].kr_picnum
!= -1) {
1729 ASSERT((bitmap
& (1 << set
->ks_req
[i
].kr_picnum
)) == 0);
1730 bitmap
|= (1 << set
->ks_req
[i
].kr_picnum
);
1731 if (++i
== set
->ks_nreqs
)
1736 ctrmap
= pcbe_ops
->pcbe_event_coverage(set
->ks_req
[i
].kr_event
);
1737 for (j
= 0; j
< cpc_ncounters
; j
++) {
1738 if (ctrmap
& (1 << j
) && (bitmap
& (1 << j
)) == 0 &&
1739 (resmap
& (1 << j
)) == 0) {
1741 * We can assign this counter because:
1743 * 1. It can count the event (ctrmap)
1744 * 2. It hasn't been assigned yet (bitmap)
1745 * 3. It wasn't reserved by a request (resmap)
1751 if (j
== cpc_ncounters
) {
1752 for (i
= 0; i
< set
->ks_nreqs
; i
++)
1753 set
->ks_req
[i
].kr_picnum
= scratch
[i
];
1756 set
->ks_req
[i
].kr_picnum
= j
;
1758 if (++i
== set
->ks_nreqs
)
1760 } while (i
!= starting_req
);
1766 kcpc_dup_set(kcpc_set_t
*set
)
1772 new = kmem_zalloc(sizeof (*new), KM_SLEEP
);
1773 new->ks_state
&= ~KCPC_SET_BOUND
;
1774 new->ks_flags
= set
->ks_flags
;
1775 new->ks_nreqs
= set
->ks_nreqs
;
1776 new->ks_req
= kmem_alloc(set
->ks_nreqs
* sizeof (kcpc_request_t
),
1778 new->ks_data
= NULL
;
1781 for (i
= 0; i
< new->ks_nreqs
; i
++) {
1782 new->ks_req
[i
].kr_config
= NULL
;
1783 new->ks_req
[i
].kr_index
= set
->ks_req
[i
].kr_index
;
1784 new->ks_req
[i
].kr_picnum
= set
->ks_req
[i
].kr_picnum
;
1785 new->ks_req
[i
].kr_picp
= NULL
;
1786 new->ks_req
[i
].kr_data
= NULL
;
1787 (void) strncpy(new->ks_req
[i
].kr_event
, set
->ks_req
[i
].kr_event
,
1789 new->ks_req
[i
].kr_preset
= set
->ks_req
[i
].kr_preset
;
1790 new->ks_req
[i
].kr_flags
= set
->ks_req
[i
].kr_flags
;
1791 new->ks_req
[i
].kr_nattrs
= set
->ks_req
[i
].kr_nattrs
;
1792 new->ks_req
[i
].kr_attr
= kmem_alloc(new->ks_req
[i
].kr_nattrs
*
1793 sizeof (kcpc_attr_t
), KM_SLEEP
);
1794 for (j
= 0; j
< new->ks_req
[i
].kr_nattrs
; j
++) {
1795 new->ks_req
[i
].kr_attr
[j
].ka_val
=
1796 set
->ks_req
[i
].kr_attr
[j
].ka_val
;
1797 (void) strncpy(new->ks_req
[i
].kr_attr
[j
].ka_name
,
1798 set
->ks_req
[i
].kr_attr
[j
].ka_name
,
1807 kcpc_allow_nonpriv(void *token
)
1809 return (((kcpc_ctx_t
*)token
)->kc_flags
& KCPC_CTX_NONPRIV
);
1813 kcpc_invalidate(kthread_t
*t
)
1815 kcpc_ctx_t
*ctx
= t
->t_cpc_ctx
;
1818 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID
);
1822 * Given a PCBE ID, attempt to load a matching PCBE module. The strings given
1823 * are used to construct PCBE names, starting with the most specific,
1824 * "pcbe.first.second.third.fourth" and ending with the least specific,
1827 * Returns 0 if a PCBE was successfully loaded and -1 upon error.
1830 kcpc_pcbe_tryload(const char *prefix
, uint_t first
, uint_t second
, uint_t third
)
1838 return (modload_qualified("pcbe",
1839 "pcbe", prefix
, ".", s
, 3, NULL
) < 0 ? -1 : 0);
1843 * Create one or more CPC context for given CPU with specified counter event
1846 * If number of requested counter events is less than or equal number of
1847 * hardware counters on a CPU and can all be assigned to the counters on a CPU
1848 * at the same time, then make one CPC context.
1850 * Otherwise, multiple CPC contexts are created to allow multiplexing more
1851 * counter events than existing counters onto the counters by iterating through
1852 * all of the CPC contexts, programming the counters with each CPC context one
1853 * at a time and measuring the resulting counter values. Each of the resulting
1854 * CPC contexts contains some number of requested counter events less than or
1855 * equal the number of counters on a CPU depending on whether all the counter
1856 * events can be programmed on all the counters at the same time or not.
1858 * Flags to kmem_{,z}alloc() are passed in as an argument to allow specifying
1859 * whether memory allocation should be non-blocking or not. The code will try
1860 * to allocate *whole* CPC contexts if possible. If there is any memory
1861 * allocation failure during the allocations needed for a given CPC context, it
1862 * will skip allocating that CPC context because it cannot allocate the whole
1863 * thing. Thus, the only time that it will end up allocating none (ie. no CPC
1864 * contexts whatsoever) is when it cannot even allocate *one* whole CPC context
1865 * without a memory allocation failure occurring.
1868 kcpc_cpu_ctx_create(cpu_t
*cp
, kcpc_request_list_t
*req_list
, int kmem_flags
,
1869 kcpc_ctx_t
***ctx_ptr_array
, size_t *ctx_ptr_array_sz
)
1871 kcpc_ctx_t
**ctx_ptrs
;
1875 kcpc_request_t
*reqs
;
1877 if (cp
== NULL
|| ctx_ptr_array
== NULL
|| ctx_ptr_array_sz
== NULL
||
1878 req_list
== NULL
|| req_list
->krl_cnt
< 1)
1882 * Allocate number of sets assuming that each set contains one and only
1883 * one counter event request for each counter on a CPU
1885 nreqs
= req_list
->krl_cnt
;
1886 nctx_ptrs
= (nreqs
+ cpc_ncounters
- 1) / cpc_ncounters
;
1887 ctx_ptrs
= kmem_zalloc(nctx_ptrs
* sizeof (kcpc_ctx_t
*), kmem_flags
);
1888 if (ctx_ptrs
== NULL
)
1892 * Fill in sets of requests
1895 reqs
= req_list
->krl_list
;
1902 * Allocate CPC context and set for requested counter events
1904 ctx
= kcpc_ctx_alloc(kmem_flags
);
1905 set
= kcpc_set_create(reqs
, nreqs
, 0, kmem_flags
);
1912 * Determine assignment of requested counter events to specific
1915 if (kcpc_assign_reqs(set
, ctx
) != 0) {
1917 * May not be able to assign requested counter events
1918 * to all counters since all counters may not be able
1919 * to do all events, so only do one counter event in
1920 * set of counter requests when this happens since at
1921 * least one of the counters must be able to do the
1925 set
= kcpc_set_create(reqs
, 1, 0, kmem_flags
);
1930 if (kcpc_assign_reqs(set
, ctx
) != 0) {
1932 cmn_err(CE_NOTE
, "!kcpc_cpu_ctx_create: can't "
1933 "assign counter event %s!\n",
1934 set
->ks_req
->kr_event
);
1945 * Allocate memory needed to hold requested counter event data
1947 set
->ks_data
= kmem_zalloc(set
->ks_nreqs
* sizeof (uint64_t),
1949 if (set
->ks_data
== NULL
) {
1956 * Configure requested counter events
1958 if (kcpc_configure_reqs(ctx
, set
, &subcode
) != 0) {
1961 "!kcpc_cpu_ctx_create: can't configure "
1962 "set of counter event requests!\n");
1964 reqs
+= set
->ks_nreqs
;
1965 nreqs
-= set
->ks_nreqs
;
1966 kmem_free(set
->ks_data
,
1967 set
->ks_nreqs
* sizeof (uint64_t));
1974 * Point set of counter event requests at this context and fill
1979 ctx
->kc_cpuid
= cp
->cpu_id
;
1980 ctx
->kc_thread
= curthread
;
1982 ctx_ptrs
[nctx
] = ctx
;
1985 * Update requests and how many are left to be assigned to sets
1987 reqs
+= set
->ks_nreqs
;
1988 nreqs
-= set
->ks_nreqs
;
1991 * Increment number of CPC contexts and allocate bigger array
1992 * for context pointers as needed
1995 if (nctx
>= nctx_ptrs
) {
2000 * Allocate more CPC contexts based on how many
2001 * contexts allocated so far and how many counter
2002 * requests left to assign
2004 new_cnt
= nctx_ptrs
+
2005 ((nreqs
+ cpc_ncounters
- 1) / cpc_ncounters
);
2006 new = kmem_zalloc(new_cnt
* sizeof (kcpc_ctx_t
*),
2012 * Copy contents of old sets into new ones
2014 bcopy(ctx_ptrs
, new,
2015 nctx_ptrs
* sizeof (kcpc_ctx_t
*));
2018 * Free old array of context pointers and use newly
2019 * allocated one instead now
2021 kmem_free(ctx_ptrs
, nctx_ptrs
* sizeof (kcpc_ctx_t
*));
2023 nctx_ptrs
= new_cnt
;
2028 * Return NULL if no CPC contexts filled in
2031 kmem_free(ctx_ptrs
, nctx_ptrs
* sizeof (kcpc_ctx_t
*));
2032 *ctx_ptr_array
= NULL
;
2033 *ctx_ptr_array_sz
= 0;
2037 *ctx_ptr_array
= ctx_ptrs
;
2038 *ctx_ptr_array_sz
= nctx_ptrs
* sizeof (kcpc_ctx_t
*);
2043 * Return whether PCBE supports given counter event
2046 kcpc_event_supported(char *event
)
2048 if (pcbe_ops
== NULL
|| pcbe_ops
->pcbe_event_coverage(event
) == 0)
2055 * Program counters on current CPU with given CPC context
2057 * If kernel is interposing on counters to measure hardware capacity and
2058 * utilization, then unprogram counters for kernel *before* programming them
2059 * with specified CPC context.
2061 * kcpc_{program,unprogram}() may be called either directly by a thread running
2062 * on the target CPU or from a cross-call from another CPU. To protect
2063 * programming and unprogramming from being interrupted by cross-calls, callers
2064 * who execute kcpc_{program,unprogram} should raise PIL to the level used by
2068 kcpc_program(kcpc_ctx_t
*ctx
, boolean_t for_thread
, boolean_t cu_interpose
)
2075 * CPC context shouldn't be NULL, its CPU field should specify current
2076 * CPU or be -1 to specify any CPU when the context is bound to a
2077 * thread, and preemption should be disabled
2079 ASSERT(ctx
!= NULL
&& (ctx
->kc_cpuid
== CPU
->cpu_id
||
2080 ctx
->kc_cpuid
== -1) && curthread
->t_preempt
> 0);
2081 if (ctx
== NULL
|| (ctx
->kc_cpuid
!= CPU
->cpu_id
&&
2082 ctx
->kc_cpuid
!= -1) || curthread
->t_preempt
< 1)
2086 * Unprogram counters for kernel measuring hardware capacity and
2089 if (cu_interpose
== B_TRUE
) {
2090 cu_cpc_unprogram(CPU
, &error
);
2092 kcpc_set_t
*set
= ctx
->kc_set
;
2095 ASSERT(set
!= NULL
);
2098 * Since cu_interpose is false, we are programming CU context.
2099 * In general, PCBE can continue from the state saved in the
2100 * set, but it is not very reliable, so we start again from the
2103 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
2105 * Reset the virtual counter value to the preset value.
2107 *(set
->ks_req
[i
].kr_data
) = set
->ks_req
[i
].kr_preset
;
2110 * Reset PCBE to the preset value.
2112 pcbe_ops
->pcbe_configure(0, NULL
,
2113 set
->ks_req
[i
].kr_preset
,
2114 0, 0, NULL
, &set
->ks_req
[i
].kr_config
, NULL
);
2119 * Program counters with specified CPC context
2121 ctx
->kc_rawtick
= KCPC_GET_TICK();
2122 pcbe_ops
->pcbe_program(ctx
);
2125 * Denote that counters programmed for thread or CPU CPC context
2128 if (for_thread
== B_TRUE
)
2129 KCPC_CTX_FLAG_CLR(ctx
, KCPC_CTX_FREEZE
);
2131 CPU
->cpu_cpc_ctx
= ctx
;
2135 * Unprogram counters with given CPC context on current CPU
2137 * If kernel is interposing on counters to measure hardware capacity and
2138 * utilization, then program counters for the kernel capacity and utilization
2139 * *after* unprogramming them for given CPC context.
2141 * See the comment for kcpc_program regarding the synchronization with
2145 kcpc_unprogram(kcpc_ctx_t
*ctx
, boolean_t cu_interpose
)
2152 * CPC context shouldn't be NULL, its CPU field should specify current
2153 * CPU or be -1 to specify any CPU when the context is bound to a
2154 * thread, and preemption should be disabled
2156 ASSERT(ctx
!= NULL
&& (ctx
->kc_cpuid
== CPU
->cpu_id
||
2157 ctx
->kc_cpuid
== -1) && curthread
->t_preempt
> 0);
2159 if (ctx
== NULL
|| (ctx
->kc_cpuid
!= CPU
->cpu_id
&&
2160 ctx
->kc_cpuid
!= -1) || curthread
->t_preempt
< 1 ||
2161 (ctx
->kc_flags
& KCPC_CTX_INVALID_STOPPED
) != 0) {
2166 * Specified CPC context to be unprogrammed should be bound to current
2169 ASSERT(CPU
->cpu_cpc_ctx
== ctx
|| curthread
->t_cpc_ctx
== ctx
);
2174 pcbe_ops
->pcbe_allstop();
2175 KCPC_CTX_FLAG_SET(ctx
, KCPC_CTX_INVALID_STOPPED
);
2178 * Allow kernel to interpose on counters and program them for its own
2179 * use to measure hardware capacity and utilization if cu_interpose
2182 if (cu_interpose
== B_TRUE
)
2183 cu_cpc_program(CPU
, &error
);
2187 * Read CPU Performance Counter (CPC) on current CPU and call specified update
2188 * routine with data for each counter event currently programmed on CPU
2191 kcpc_read(kcpc_update_func_t update_func
)
2195 kcpc_request_t
*req
;
2202 * Can't grab locks or block because may be called inside dispatcher
2206 ctx
= CPU
->cpu_cpc_ctx
;
2213 * Read counter data from current CPU
2215 pcbe_ops
->pcbe_sample(ctx
);
2218 if (set
== NULL
|| set
->ks_req
== NULL
) {
2224 * Call update function with preset pointer and data for each CPC event
2225 * request currently programmed on current CPU
2229 for (i
= 0; i
< set
->ks_nreqs
; i
++) {
2232 if (req
[i
].kr_data
== NULL
)
2235 ret
= update_func(req
[i
].kr_ptr
, *req
[i
].kr_data
);
2246 * Initialize list of counter event requests
2248 kcpc_request_list_t
*
2249 kcpc_reqs_init(int nreqs
, int kmem_flags
)
2251 kcpc_request_list_t
*req_list
;
2252 kcpc_request_t
*reqs
;
2257 req_list
= kmem_zalloc(sizeof (kcpc_request_list_t
), kmem_flags
);
2258 if (req_list
== NULL
)
2261 reqs
= kmem_zalloc(nreqs
* sizeof (kcpc_request_t
), kmem_flags
);
2263 kmem_free(req_list
, sizeof (kcpc_request_list_t
));
2267 req_list
->krl_list
= reqs
;
2268 req_list
->krl_cnt
= 0;
2269 req_list
->krl_max
= nreqs
;
2275 * Add counter event request to given list of counter event requests
2278 kcpc_reqs_add(kcpc_request_list_t
*req_list
, char *event
, uint64_t preset
,
2279 uint_t flags
, uint_t nattrs
, kcpc_attr_t
*attr
, void *ptr
, int kmem_flags
)
2281 kcpc_request_t
*req
;
2283 if (req_list
== NULL
|| req_list
->krl_list
== NULL
)
2286 ASSERT(req_list
->krl_max
!= 0);
2289 * Allocate more space (if needed)
2291 if (req_list
->krl_cnt
> req_list
->krl_max
) {
2292 kcpc_request_t
*new;
2293 kcpc_request_t
*old
;
2295 old
= req_list
->krl_list
;
2296 new = kmem_zalloc((req_list
->krl_max
+
2297 cpc_ncounters
) * sizeof (kcpc_request_t
), kmem_flags
);
2301 req_list
->krl_list
= new;
2302 bcopy(old
, req_list
->krl_list
,
2303 req_list
->krl_cnt
* sizeof (kcpc_request_t
));
2304 kmem_free(old
, req_list
->krl_max
* sizeof (kcpc_request_t
));
2305 req_list
->krl_cnt
= 0;
2306 req_list
->krl_max
+= cpc_ncounters
;
2310 * Fill in request as much as possible now, but some fields will need
2311 * to be set when request is assigned to a set.
2313 req
= &req_list
->krl_list
[req_list
->krl_cnt
];
2314 req
->kr_config
= NULL
;
2315 req
->kr_picnum
= -1; /* have CPC pick this */
2316 req
->kr_index
= -1; /* set when assigning request to set */
2317 req
->kr_data
= NULL
; /* set when configuring request */
2318 (void) strcpy(req
->kr_event
, event
);
2319 req
->kr_preset
= preset
;
2320 req
->kr_flags
= flags
;
2321 req
->kr_nattrs
= nattrs
;
2322 req
->kr_attr
= attr
;
2324 * Keep pointer given by caller to give to update function when this
2325 * counter event is sampled/read
2329 req_list
->krl_cnt
++;
2335 * Reset list of CPC event requests so its space can be used for another set
2339 kcpc_reqs_reset(kcpc_request_list_t
*req_list
)
2342 * Return when pointer to request list structure or request is NULL or
2343 * when max requests is less than or equal to 0
2345 if (req_list
== NULL
|| req_list
->krl_list
== NULL
||
2346 req_list
->krl_max
<= 0)
2350 * Zero out requests and number of requests used
2352 bzero(req_list
->krl_list
, req_list
->krl_max
* sizeof (kcpc_request_t
));
2353 req_list
->krl_cnt
= 0;
2358 * Free given list of counter event requests
2361 kcpc_reqs_fini(kcpc_request_list_t
*req_list
)
2363 kmem_free(req_list
->krl_list
,
2364 req_list
->krl_max
* sizeof (kcpc_request_t
));
2365 kmem_free(req_list
, sizeof (kcpc_request_list_t
));
2370 * Create set of given counter event requests
2373 kcpc_set_create(kcpc_request_t
*reqs
, int nreqs
, int set_flags
, int kmem_flags
)
2379 * Allocate set and assign number of requests in set and flags
2381 set
= kmem_zalloc(sizeof (kcpc_set_t
), kmem_flags
);
2385 if (nreqs
< cpc_ncounters
)
2386 set
->ks_nreqs
= nreqs
;
2388 set
->ks_nreqs
= cpc_ncounters
;
2390 set
->ks_flags
= set_flags
;
2393 * Allocate requests needed, copy requests into set, and set index into
2394 * data for each request (which may change when we assign requested
2395 * counter events to counters)
2397 set
->ks_req
= (kcpc_request_t
*)kmem_zalloc(sizeof (kcpc_request_t
) *
2398 set
->ks_nreqs
, kmem_flags
);
2399 if (set
->ks_req
== NULL
) {
2400 kmem_free(set
, sizeof (kcpc_set_t
));
2404 bcopy(reqs
, set
->ks_req
, sizeof (kcpc_request_t
) * set
->ks_nreqs
);
2406 for (i
= 0; i
< set
->ks_nreqs
; i
++)
2407 set
->ks_req
[i
].kr_index
= i
;
2414 * Stop counters on current CPU.
2416 * If preserve_context is true, the caller is interested in the CPU's CPC
2417 * context and wants it to be preserved.
2419 * If preserve_context is false, the caller does not need the CPU's CPC context
2420 * to be preserved, so it is set to NULL.
2423 kcpc_cpustop_func(boolean_t preserve_context
)
2428 * Someone already stopped this context before us, so there is nothing
2431 if (CPU
->cpu_cpc_ctx
== NULL
) {
2436 kcpc_unprogram(CPU
->cpu_cpc_ctx
, B_TRUE
);
2438 * If CU does not use counters, then clear the CPU's CPC context
2439 * If the caller requested to preserve context it should disable CU
2440 * first, so there should be no CU context now.
2442 ASSERT(!preserve_context
|| !CU_CPC_ON(CPU
));
2443 if (!preserve_context
&& CPU
->cpu_cpc_ctx
!= NULL
&& !CU_CPC_ON(CPU
))
2444 CPU
->cpu_cpc_ctx
= NULL
;
2450 * Stop counters on given CPU and set its CPC context to NULL unless
2451 * preserve_context is true.
2454 kcpc_cpu_stop(cpu_t
*cp
, boolean_t preserve_context
)
2456 cpu_call(cp
, (cpu_call_func_t
)kcpc_cpustop_func
,
2457 preserve_context
, 0);
2461 * Program the context on the current CPU
2464 kcpc_remoteprogram_func(kcpc_ctx_t
*ctx
, uintptr_t arg
)
2466 boolean_t for_thread
= (boolean_t
)arg
;
2468 ASSERT(ctx
!= NULL
);
2471 kcpc_program(ctx
, for_thread
, B_TRUE
);
2476 * Program counters on given CPU
2479 kcpc_cpu_program(cpu_t
*cp
, kcpc_ctx_t
*ctx
)
2481 cpu_call(cp
, (cpu_call_func_t
)kcpc_remoteprogram_func
, (uintptr_t)ctx
,
2482 (uintptr_t)B_FALSE
);
2486 kcpc_list_attrs(void)
2488 ASSERT(pcbe_ops
!= NULL
);
2490 return (pcbe_ops
->pcbe_list_attrs());
2494 kcpc_list_events(uint_t pic
)
2496 ASSERT(pcbe_ops
!= NULL
);
2498 return (pcbe_ops
->pcbe_list_events(pic
));
2502 kcpc_pcbe_capabilities(void)
2504 ASSERT(pcbe_ops
!= NULL
);
2506 return (pcbe_ops
->pcbe_caps
);
2510 kcpc_pcbe_loaded(void)
2512 return (pcbe_ops
== NULL
? -1 : 0);