4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright 2019 Joyent, Inc.
32 #include <sys/pool_impl.h>
33 #include <sys/pool_pset.h>
34 #include <sys/cpuvar.h>
35 #include <sys/cpupart.h>
36 #include <sys/mutex.h>
37 #include <sys/errno.h>
38 #include <sys/systm.h>
39 #include <sys/cmn_err.h>
41 #include <sys/exacct.h>
43 #include <sys/policy.h>
44 #include <sys/class.h>
50 * Processor set plugin for pools.
52 * This file contains various routines used by the common pools layer to create,
53 * modify, and destroy processor sets. All processor sets created by this
54 * plug-in are stored in the pool_pset_list doubly-linked list, which is
55 * guaranteed to always have an entry for the default processor set,
58 * Interaction with zones:
60 * If pools are enabled, non-global zones only have visibility into the
61 * pset of the pool to which they are bound. This is accomplished by
62 * changing the set of processors and processor sets which are visible
63 * through both systemcall interfaces and system kstats.
65 * To avoid grabbing pool_lock() during cpu change operations, we cache
66 * the pset the zone is currently bound to, and can read this value
67 * while under cpu_lock. The special psetid_t token ZONE_PS_INVAL means
68 * that pools are disabled, and provides a mechanism for determining if the
69 * status of pools without grabbing pool_lock().
71 * To avoid grabbing any locks to determine the instantaneous value of
72 * the number of configured and online cpus in the zone, we also cache
73 * these values in a zone_t. If these values are zero, the pools
74 * facility must be disabled, in which case relevant systemcall
75 * interfaces will return the values for the system as a whole.
77 * The various kstat interfaces are dealt with as follows: if pools are
78 * disabled all cpu-related kstats should be exported to all zones.
79 * When pools are enabled we begin maintaining a list of "permitted
80 * zones" on a per-kstat basis. There are various hooks throughout the
81 * code to update this list when certain pools- or cpu-related events
85 static list_t pool_pset_list
; /* doubly-linked list of psets */
86 pool_pset_t
*pool_pset_default
; /* default pset */
87 hrtime_t pool_pset_mod
; /* last modification time for psets */
88 hrtime_t pool_cpu_mod
; /* last modification time for CPUs */
91 pool_lookup_pset_by_id(psetid_t psetid
)
93 pool_pset_t
*pset
= pool_pset_default
;
95 ASSERT(pool_lock_held());
97 for (pset
= list_head(&pool_pset_list
); pset
;
98 pset
= list_next(&pool_pset_list
, pset
)) {
99 if (pset
->pset_id
== psetid
)
112 * Callback function used to apply a cpu configuration event to a zone.
115 pool_pset_setup_cb(zone_t
*zone
, void *arg
)
117 struct setup_arg
*sa
= arg
;
119 ASSERT(MUTEX_HELD(&cpu_lock
));
120 ASSERT(INGLOBALZONE(curproc
));
121 ASSERT(zone
!= NULL
);
123 if (zone
== global_zone
)
125 if (zone_pset_get(zone
) != sa
->psetid
)
126 return (0); /* ignore */
129 cpu_visibility_configure(sa
->cpu
, zone
);
132 cpu_visibility_unconfigure(sa
->cpu
, zone
);
135 cpu_visibility_online(sa
->cpu
, zone
);
138 cpu_visibility_offline(sa
->cpu
, zone
);
141 cpu_visibility_add(sa
->cpu
, zone
);
143 case CPU_CPUPART_OUT
:
144 cpu_visibility_remove(sa
->cpu
, zone
);
147 cmn_err(CE_PANIC
, "invalid cpu_setup_t value %d", sa
->what
);
153 * Callback function to be executed when a noteworthy cpu event takes
154 * place. Will ensure that the event is reflected by the zones which
155 * were affected by it.
159 pool_pset_cpu_setup(cpu_setup_t what
, int id
, void *arg
)
161 processorid_t cpuid
= id
;
162 struct setup_arg sarg
;
166 ASSERT(MUTEX_HELD(&cpu_lock
));
167 ASSERT(INGLOBALZONE(curproc
));
169 if (!pool_pset_enabled())
171 if (what
!= CPU_CONFIG
&& what
!= CPU_UNCONFIG
&&
172 what
!= CPU_ON
&& what
!= CPU_OFF
&&
173 what
!= CPU_CPUPART_IN
&& what
!= CPU_CPUPART_OUT
)
177 sarg
.psetid
= cpupart_query_cpu(c
);
181 error
= zone_walk(pool_pset_setup_cb
, &sarg
);
187 * Initialize processor set plugin. Called once at boot time.
192 ASSERT(pool_pset_default
== NULL
);
193 pool_pset_default
= kmem_zalloc(sizeof (pool_pset_t
), KM_SLEEP
);
194 pool_pset_default
->pset_id
= PS_NONE
;
195 pool_pset_default
->pset_npools
= 1; /* for pool_default */
196 pool_default
->pool_pset
= pool_pset_default
;
197 list_create(&pool_pset_list
, sizeof (pool_pset_t
),
198 offsetof(pool_pset_t
, pset_link
));
199 list_insert_head(&pool_pset_list
, pool_pset_default
);
200 mutex_enter(&cpu_lock
);
201 register_cpu_setup_func(pool_pset_cpu_setup
, NULL
);
202 mutex_exit(&cpu_lock
);
206 * Dummy wrapper function that returns 0 to satisfy zone_walk().
209 pool_pset_zone_pset_set(zone_t
*zone
, void *arg
)
211 psetid_t psetid
= (psetid_t
)(uintptr_t)arg
;
213 ASSERT(MUTEX_HELD(&cpu_lock
));
214 zone_pset_set(zone
, psetid
);
219 * Enable processor set plugin.
222 pool_pset_enable(void)
227 ASSERT(pool_lock_held());
228 ASSERT(INGLOBALZONE(curproc
));
230 * Can't enable pools if there are existing cpu partitions.
232 mutex_enter(&cpu_lock
);
233 if (cp_numparts
> 1) {
234 mutex_exit(&cpu_lock
);
239 * We want to switch things such that everything that was tagged with
240 * the special ALL_ZONES token now is explicitly visible to all zones:
241 * first add individual zones to the visibility list then remove the
242 * special "ALL_ZONES" token. There must only be the default pset
243 * (PS_NONE) active if pools are being enabled, so we only need to
246 * We want to make pool_pset_enabled() start returning B_TRUE before
247 * we call any of the visibility update functions.
249 global_zone
->zone_psetid
= PS_NONE
;
251 * We need to explicitly handle the global zone since
252 * zone_pset_set() won't modify it.
254 pool_pset_visibility_add(PS_NONE
, global_zone
);
256 * A NULL argument means the ALL_ZONES token.
258 pool_pset_visibility_remove(PS_NONE
, NULL
);
259 error
= zone_walk(pool_pset_zone_pset_set
, (void *)PS_NONE
);
263 * It is safe to drop cpu_lock here. We're still
264 * holding pool_lock so no new cpu partitions can
265 * be created while we're here.
267 mutex_exit(&cpu_lock
);
268 (void) nvlist_alloc(&pool_pset_default
->pset_props
,
269 NV_UNIQUE_NAME
, KM_SLEEP
);
270 props
= pool_pset_default
->pset_props
;
271 (void) nvlist_add_string(props
, "pset.name", "pset_default");
272 (void) nvlist_add_string(props
, "pset.comment", "");
273 (void) nvlist_add_int64(props
, "pset.sys_id", PS_NONE
);
274 (void) nvlist_add_string(props
, "pset.units", "population");
275 (void) nvlist_add_byte(props
, "pset.default", 1);
276 (void) nvlist_add_uint64(props
, "pset.max", 65536);
277 (void) nvlist_add_uint64(props
, "pset.min", 1);
278 pool_pset_mod
= pool_cpu_mod
= gethrtime();
283 * Disable processor set plugin.
286 pool_pset_disable(void)
292 ASSERT(pool_lock_held());
293 ASSERT(INGLOBALZONE(curproc
));
295 mutex_enter(&cpu_lock
);
296 if (cp_numparts
> 1) { /* make sure only default pset is left */
297 mutex_exit(&cpu_lock
);
301 * Remove all non-system CPU and processor set properties
303 for (cpuid
= 0; cpuid
< NCPU
; cpuid
++) {
304 if ((cpu
= cpu_get(cpuid
)) == NULL
)
306 if (cpu
->cpu_props
!= NULL
) {
307 (void) nvlist_free(cpu
->cpu_props
);
308 cpu
->cpu_props
= NULL
;
313 * We want to switch things such that everything is now visible
314 * to ALL_ZONES: first add the special "ALL_ZONES" token to the
315 * visibility list then remove individual zones. There must
316 * only be the default pset active if pools are being disabled,
317 * so we only need to deal with it.
319 error
= zone_walk(pool_pset_zone_pset_set
, (void *)ZONE_PS_INVAL
);
321 pool_pset_visibility_add(PS_NONE
, NULL
);
322 pool_pset_visibility_remove(PS_NONE
, global_zone
);
324 * pool_pset_enabled() will henceforth return B_FALSE.
326 global_zone
->zone_psetid
= ZONE_PS_INVAL
;
327 mutex_exit(&cpu_lock
);
328 if (pool_pset_default
->pset_props
!= NULL
) {
329 nvlist_free(pool_pset_default
->pset_props
);
330 pool_pset_default
->pset_props
= NULL
;
336 * Create new processor set and give it a temporary name.
339 pool_pset_create(psetid_t
*id
)
346 ASSERT(pool_lock_held());
347 if ((err
= cpupart_create(&psetid
)) != 0)
349 pset
= kmem_alloc(sizeof (pool_pset_t
), KM_SLEEP
);
350 pset
->pset_id
= *id
= psetid
;
351 pset
->pset_npools
= 0;
352 (void) nvlist_alloc(&pset
->pset_props
, NV_UNIQUE_NAME
, KM_SLEEP
);
353 (void) nvlist_add_int64(pset
->pset_props
, "pset.sys_id", psetid
);
354 (void) nvlist_add_byte(pset
->pset_props
, "pset.default", 0);
355 pool_pset_mod
= gethrtime();
356 (void) snprintf(pset_name
, sizeof (pset_name
), "pset_%lld",
358 (void) nvlist_add_string(pset
->pset_props
, "pset.name", pset_name
);
359 list_insert_tail(&pool_pset_list
, pset
);
364 * Destroy existing processor set.
367 pool_pset_destroy(psetid_t psetid
)
372 ASSERT(pool_lock_held());
374 if (psetid
== PS_NONE
)
376 if ((pset
= pool_lookup_pset_by_id(psetid
)) == NULL
)
378 if (pset
->pset_npools
> 0) /* can't destroy associated psets */
380 if ((ret
= cpupart_destroy(pset
->pset_id
)) != 0)
382 (void) nvlist_free(pset
->pset_props
);
383 list_remove(&pool_pset_list
, pset
);
384 pool_pset_mod
= gethrtime();
385 kmem_free(pset
, sizeof (pool_pset_t
));
390 * Change the visibility of a pset (and all contained cpus) in a zone.
391 * A NULL zone argument implies the special ALL_ZONES token.
394 pool_pset_visibility_change(psetid_t psetid
, zone_t
*zone
, boolean_t add
)
396 zoneid_t zoneid
= zone
? zone
->zone_id
: ALL_ZONES
;
400 ASSERT(MUTEX_HELD(&cpu_lock
));
401 ASSERT(psetid
!= ZONE_PS_INVAL
);
403 cp
= cpupart_find(psetid
);
405 if (cp
->cp_kstat
!= NULL
) {
407 kstat_zone_add(cp
->cp_kstat
, zoneid
);
409 kstat_zone_remove(cp
->cp_kstat
, zoneid
);
415 if (c
->cpu_part
== cp
&& !cpu_is_poweredoff(c
)) {
417 cpu_visibility_add(c
, zone
);
419 cpu_visibility_remove(c
, zone
);
421 } while ((c
= c
->cpu_next
) != cpu_list
);
425 * Make the processor set visible to the zone. A NULL value for
426 * the zone means that the special ALL_ZONES token should be added to
427 * the visibility list.
430 pool_pset_visibility_add(psetid_t psetid
, zone_t
*zone
)
432 pool_pset_visibility_change(psetid
, zone
, B_TRUE
);
436 * Remove zone's visibility into the processor set. A NULL value for
437 * the zone means that the special ALL_ZONES token should be removed
438 * from the visibility list.
441 pool_pset_visibility_remove(psetid_t psetid
, zone_t
*zone
)
443 pool_pset_visibility_change(psetid
, zone
, B_FALSE
);
447 * Quick way of seeing if pools are enabled (as far as processor sets are
448 * concerned) without holding pool_lock().
451 pool_pset_enabled(void)
453 ASSERT(MUTEX_HELD(&cpu_lock
));
455 return (zone_pset_get(global_zone
) != ZONE_PS_INVAL
);
458 struct assoc_zone_arg
{
464 * Callback function to update a zone's processor set visibility when
465 * a pool is associated with a processor set.
468 pool_pset_assoc_zone_cb(zone_t
*zone
, void *arg
)
470 struct assoc_zone_arg
*aza
= arg
;
472 zoneid_t zoneid
= zone
->zone_id
;
474 ASSERT(pool_lock_held());
475 ASSERT(MUTEX_HELD(&cpu_lock
));
477 if (zoneid
== GLOBAL_ZONEID
)
479 pool
= zone_pool_get(zone
);
480 if (pool
->pool_id
== aza
->poolid
)
481 zone_pset_set(zone
, aza
->newpsetid
);
486 * Associate pool with new processor set.
489 pool_pset_assoc(poolid_t poolid
, psetid_t psetid
)
492 pool_pset_t
*pset
, *oldpset
;
495 ASSERT(pool_lock_held());
497 if ((pool
= pool_lookup_pool_by_id(poolid
)) == NULL
||
498 (pset
= pool_lookup_pset_by_id(psetid
)) == NULL
) {
501 if (pool
->pool_pset
->pset_id
== psetid
) {
503 * Already associated.
509 * Hang the new pset off the pool, and rebind all of the pool's
510 * processes to it. If pool_do_bind fails, all processes will remain
511 * bound to the old set.
513 oldpset
= pool
->pool_pset
;
514 pool
->pool_pset
= pset
;
515 err
= pool_do_bind(pool
, P_POOLID
, poolid
, POOL_BIND_PSET
);
517 pool
->pool_pset
= oldpset
;
519 struct assoc_zone_arg azarg
;
522 * Update zones' visibility to reflect changes.
524 azarg
.poolid
= poolid
;
525 azarg
.newpsetid
= pset
->pset_id
;
526 mutex_enter(&cpu_lock
);
527 err
= zone_walk(pool_pset_assoc_zone_cb
, &azarg
);
529 mutex_exit(&cpu_lock
);
531 oldpset
->pset_npools
--;
538 * Transfer specified CPUs between processor sets.
541 pool_pset_xtransfer(psetid_t src
, psetid_t dst
, size_t size
, id_t
*ids
)
547 ASSERT(pool_lock_held());
548 ASSERT(INGLOBALZONE(curproc
));
550 if (size
== 0 || size
> max_ncpus
) /* quick sanity check */
553 mutex_enter(&cpu_lock
);
554 for (id
= 0; id
< size
; id
++) {
555 if ((cpu
= cpu_get((processorid_t
)ids
[id
])) == NULL
||
556 cpupart_query_cpu(cpu
) != src
) {
560 if ((ret
= cpupart_attach_cpu(dst
, cpu
, 1)) != 0)
563 mutex_exit(&cpu_lock
);
565 pool_pset_mod
= gethrtime();
570 * Bind process to processor set. This should never fail because
571 * we should've done all preliminary checks before calling it.
574 pool_pset_bind(proc_t
*p
, psetid_t psetid
, void *projbuf
, void *zonebuf
)
579 ASSERT(pool_lock_held());
580 ASSERT(MUTEX_HELD(&cpu_lock
));
581 ASSERT(MUTEX_HELD(&pidlock
));
582 ASSERT(MUTEX_HELD(&p
->p_lock
));
584 if ((t
= p
->p_tlist
) == NULL
)
587 ret
= cpupart_bind_thread(t
, psetid
, 0, projbuf
, zonebuf
);
589 t
->t_bind_pset
= psetid
;
590 } while ((t
= t
->t_forw
) != p
->p_tlist
);
594 * See the comment above pool_do_bind() for the semantics of the pset_bind_*()
595 * functions. These must be kept in sync with cpupart_move_thread, and
596 * anything else that could fail a pool_pset_bind.
598 * Returns non-zero errno on failure and zero on success.
599 * Iff successful, cpu_lock is held on return.
602 pset_bind_start(proc_t
**procs
, pool_t
*pool
)
610 extern int cpupart_movable_thread(kthread_id_t
, cpupart_t
*, int);
612 ASSERT(pool_lock_held());
613 ASSERT(INGLOBALZONE(curproc
));
615 mutex_enter(&cpu_lock
);
618 newpp
= cpupart_find(pool
->pool_pset
->pset_id
);
619 ASSERT(newpp
!= NULL
);
620 if (newpp
->cp_cpulist
== NULL
) {
622 mutex_exit(&cpu_lock
);
629 * Check for the PRIV_PROC_PRIOCNTL privilege that is required
630 * to enter and exit scheduling classes. If other privileges
631 * are required by CL_ENTERCLASS/CL_CANEXIT types of routines
632 * in the future, this code will have to be updated.
634 if (secpolicy_setpriority(pcred
) != 0) {
636 mutex_exit(&cpu_lock
);
641 for (pp
= procs
; (p
= *pp
) != NULL
; pp
++) {
642 mutex_enter(&p
->p_lock
);
643 if ((t
= p
->p_tlist
) == NULL
) {
644 mutex_exit(&p
->p_lock
);
648 * Check our basic permissions to control this process.
650 if (!prochasprocperm(p
, curproc
, pcred
)) {
651 mutex_exit(&p
->p_lock
);
653 mutex_exit(&cpu_lock
);
659 * Check that all threads can be moved to
660 * a new processor set.
663 ret
= cpupart_movable_thread(t
, newpp
, 0);
666 mutex_exit(&p
->p_lock
);
668 mutex_exit(&cpu_lock
);
672 } while ((t
= t
->t_forw
) != p
->p_tlist
);
673 mutex_exit(&p
->p_lock
);
676 return (0); /* with cpu_lock held and weakbinding stopped */
681 pset_bind_abort(proc_t
**procs
, pool_t
*pool
)
683 mutex_exit(&cpu_lock
);
687 pset_bind_finish(void)
690 mutex_exit(&cpu_lock
);
693 static pool_property_t pool_pset_props
[] = {
694 { "pset.name", DATA_TYPE_STRING
, PP_RDWR
},
695 { "pset.comment", DATA_TYPE_STRING
, PP_RDWR
},
696 { "pset.sys_id", DATA_TYPE_UINT64
, PP_READ
},
697 { "pset.units", DATA_TYPE_STRING
, PP_RDWR
},
698 { "pset.default", DATA_TYPE_BYTE
, PP_READ
},
699 { "pset.min", DATA_TYPE_UINT64
, PP_RDWR
},
700 { "pset.max", DATA_TYPE_UINT64
, PP_RDWR
},
701 { "pset.size", DATA_TYPE_UINT64
, PP_READ
},
702 { "pset.load", DATA_TYPE_UINT64
, PP_READ
},
703 { "pset.poold.objectives", DATA_TYPE_STRING
,
704 PP_RDWR
| PP_OPTIONAL
},
708 static pool_property_t pool_cpu_props
[] = {
709 { "cpu.sys_id", DATA_TYPE_UINT64
, PP_READ
},
710 { "cpu.comment", DATA_TYPE_STRING
, PP_RDWR
},
711 { "cpu.status", DATA_TYPE_STRING
, PP_RDWR
},
712 { "cpu.pinned", DATA_TYPE_BYTE
,
713 PP_RDWR
| PP_OPTIONAL
},
718 * Put property on the specified processor set.
721 pool_pset_propput(psetid_t psetid
, nvpair_t
*pair
)
726 ASSERT(pool_lock_held());
728 if ((pset
= pool_lookup_pset_by_id(psetid
)) == NULL
)
730 ret
= pool_propput_common(pset
->pset_props
, pair
, pool_pset_props
);
732 pool_pset_mod
= gethrtime();
737 * Remove existing processor set property.
740 pool_pset_proprm(psetid_t psetid
, char *name
)
745 ASSERT(pool_lock_held());
747 if ((pset
= pool_lookup_pset_by_id(psetid
)) == NULL
)
749 ret
= pool_proprm_common(pset
->pset_props
, name
, pool_pset_props
);
751 pool_pset_mod
= gethrtime();
756 * Put new CPU property.
757 * Handle special case of "cpu.status".
760 pool_cpu_propput(processorid_t cpuid
, nvpair_t
*pair
)
765 ASSERT(pool_lock_held());
766 ASSERT(INGLOBALZONE(curproc
));
768 if (nvpair_type(pair
) == DATA_TYPE_STRING
&&
769 strcmp(nvpair_name(pair
), "cpu.status") == 0) {
773 (void) nvpair_value_string(pair
, &val
);
774 if (strcmp(val
, PS_OFFLINE
) == 0)
776 else if (strcmp(val
, PS_ONLINE
) == 0)
778 else if (strcmp(val
, PS_NOINTR
) == 0)
780 else if (strcmp(val
, PS_FAULTED
) == 0)
782 else if (strcmp(val
, PS_SPARE
) == 0)
786 ret
= p_online_internal(cpuid
, status
, &old_status
);
788 mutex_enter(&cpu_lock
);
789 if ((cpu
= cpu_get(cpuid
)) == NULL
)
791 if (cpu
->cpu_props
== NULL
) {
792 (void) nvlist_alloc(&cpu
->cpu_props
,
793 NV_UNIQUE_NAME
, KM_SLEEP
);
794 (void) nvlist_add_string(cpu
->cpu_props
,
797 ret
= pool_propput_common(cpu
->cpu_props
, pair
, pool_cpu_props
);
799 pool_cpu_mod
= gethrtime();
800 mutex_exit(&cpu_lock
);
806 * Remove existing CPU property.
809 pool_cpu_proprm(processorid_t cpuid
, char *name
)
814 ASSERT(pool_lock_held());
815 ASSERT(INGLOBALZONE(curproc
));
817 mutex_enter(&cpu_lock
);
818 if ((cpu
= cpu_get(cpuid
)) == NULL
|| cpu_is_poweredoff(cpu
)) {
821 if (cpu
->cpu_props
== NULL
)
824 ret
= pool_proprm_common(cpu
->cpu_props
, name
,
828 pool_cpu_mod
= gethrtime();
829 mutex_exit(&cpu_lock
);
834 * This macro returns load average multiplied by 1000 w/o losing precision
836 #define PSET_LOAD(f) (((f >> 16) * 1000) + (((f & 0xffff) * 1000) / 0xffff))
839 * Take a snapshot of the current state of processor sets and CPUs,
840 * pack it in the exacct format, and attach it to specified exacct record.
843 pool_pset_pack(ea_object_t
*eo_system
)
845 ea_object_t
*eo_pset
, *eo_cpu
;
855 ASSERT(pool_lock_held());
857 mutex_enter(&cpu_lock
);
858 mypsetid
= zone_pset_get(curproc
->p_zone
);
859 for (pset
= list_head(&pool_pset_list
); pset
;
860 pset
= list_next(&pool_pset_list
, pset
)) {
861 psetid_t psetid
= pset
->pset_id
;
863 if (!INGLOBALZONE(curproc
) && mypsetid
!= psetid
)
865 cpupart
= cpupart_find(psetid
);
866 ASSERT(cpupart
!= NULL
);
867 eo_pset
= ea_alloc_group(EXT_GROUP
|
868 EXC_LOCAL
| EXD_GROUP_PSET
);
869 (void) ea_attach_item(eo_pset
, &psetid
, sizeof (id_t
),
870 EXC_LOCAL
| EXD_PSET_PSETID
| EXT_UINT32
);
872 * Pack info for all CPUs in this processor set.
877 if (cpu
->cpu_part
!= cpupart
) /* not our pset */
880 eo_cpu
= ea_alloc_group(EXT_GROUP
881 | EXC_LOCAL
| EXD_GROUP_CPU
);
882 (void) ea_attach_item(eo_cpu
, &cpu
->cpu_id
,
883 sizeof (processorid_t
),
884 EXC_LOCAL
| EXD_CPU_CPUID
| EXT_UINT32
);
885 if (cpu
->cpu_props
== NULL
) {
886 (void) nvlist_alloc(&cpu
->cpu_props
,
887 NV_UNIQUE_NAME
, KM_SLEEP
);
888 (void) nvlist_add_string(cpu
->cpu_props
,
891 (void) nvlist_dup(cpu
->cpu_props
, &nvl
, KM_SLEEP
);
892 (void) nvlist_add_int64(nvl
, "cpu.sys_id", cpu
->cpu_id
);
893 (void) nvlist_add_string(nvl
, "cpu.status",
894 (char *)cpu_get_state_str(cpu
->cpu_flags
));
897 (void) nvlist_pack(nvl
, &buf
, &bufsz
,
898 NV_ENCODE_NATIVE
, 0);
899 (void) ea_attach_item(eo_cpu
, buf
, bufsz
,
900 EXC_LOCAL
| EXD_CPU_PROP
| EXT_RAW
);
901 (void) nvlist_free(nvl
);
902 kmem_free(buf
, bufsz
);
903 (void) ea_attach_to_group(eo_pset
, eo_cpu
);
904 } while ((cpu
= cpu
->cpu_next
) != cpu_list
);
906 (void) nvlist_dup(pset
->pset_props
, &nvl
, KM_SLEEP
);
907 (void) nvlist_add_uint64(nvl
, "pset.size", ncpu
);
908 (void) nvlist_add_uint64(nvl
, "pset.load",
909 (uint64_t)PSET_LOAD(cpupart
->cp_hp_avenrun
[0]));
912 (void) nvlist_pack(nvl
, &buf
, &bufsz
, NV_ENCODE_NATIVE
, 0);
913 (void) ea_attach_item(eo_pset
, buf
, bufsz
,
914 EXC_LOCAL
| EXD_PSET_PROP
| EXT_RAW
);
915 (void) nvlist_free(nvl
);
916 kmem_free(buf
, bufsz
);
918 (void) ea_attach_to_group(eo_system
, eo_pset
);
920 mutex_exit(&cpu_lock
);
925 * Get dynamic property for processor sets.
926 * The only dynamic property currently implemented is "pset.load".
929 pool_pset_propget(psetid_t psetid
, char *name
, nvlist_t
*nvl
)
935 ASSERT(pool_lock_held());
937 mutex_enter(&cpu_lock
);
938 pset
= pool_lookup_pset_by_id(psetid
);
939 cpupart
= cpupart_find(psetid
);
940 if (cpupart
== NULL
|| pset
== NULL
) {
941 mutex_exit(&cpu_lock
);
944 if (strcmp(name
, "pset.load") == 0)
945 ret
= nvlist_add_uint64(nvl
, "pset.load",
946 (uint64_t)PSET_LOAD(cpupart
->cp_hp_avenrun
[0]));
949 mutex_exit(&cpu_lock
);
954 * Get dynamic property for CPUs.
955 * The only dynamic property currently implemented is "cpu.status".
958 pool_cpu_propget(processorid_t cpuid
, char *name
, nvlist_t
*nvl
)
963 ASSERT(pool_lock_held());
965 mutex_enter(&cpu_lock
);
966 if ((cpu
= cpu_get(cpuid
)) == NULL
) {
967 mutex_exit(&cpu_lock
);
970 if (strcmp(name
, "cpu.status") == 0) {
971 ret
= nvlist_add_string(nvl
, "cpu.status",
972 (char *)cpu_get_state_str(cpu
->cpu_flags
));
976 mutex_exit(&cpu_lock
);