2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
5 * Copyright (c) 2008 Nokia Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/refcount.h>
46 #include <sys/sched.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/cpuset.h>
51 #include <sys/refcount.h>
52 #include <sys/queue.h>
53 #include <sys/limits.h>
55 #include <sys/interrupt.h>
64 * cpusets provide a mechanism for creating and manipulating sets of
65 * processors for the purpose of constraining the scheduling of threads to
66 * specific processors.
68 * Each process belongs to an identified set, by default this is set 1. Each
69 * thread may further restrict the cpus it may run on to a subset of this
70 * named set. This creates an anonymous set which other threads and processes
71 * may not join by number.
73 * The named set is referred to herein as the 'base' set to avoid ambiguity.
74 * This set is usually a child of a 'root' set while the anonymous set may
75 * simply be referred to as a mask. In the syscall api these are referred to
76 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
78 * Threads inherit their set from their creator whether it be anonymous or
79 * not. This means that anonymous sets are immutable because they may be
80 * shared. To modify an anonymous set a new set is created with the desired
81 * mask and the same parent as the existing anonymous set. This gives the
82 * illusion of each thread having a private mask.A
84 * Via the syscall apis a user may ask to retrieve or modify the root, base,
85 * or mask that is discovered via a pid, tid, or setid. Modifying a set
86 * modifies all numbered and anonymous child sets to comply with the new mask.
87 * Modifying a pid or tid's mask applies only to that tid but must still
88 * exist within the assigned parent set.
90 * A thread may not be assigned to a a group seperate from other threads in
91 * the process. This is to remove ambiguity when the setid is queried with
92 * a pid argument. There is no other technical limitation.
94 * This somewhat complex arrangement is intended to make it easy for
95 * applications to query available processors and bind their threads to
96 * specific processors while also allowing administrators to dynamically
97 * reprovision by changing sets which apply to groups of processes.
99 * A simple application should not concern itself with sets at all and
100 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
101 * meaning 'curthread'. It may query availble cpus for that tid with a
102 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
104 static uma_zone_t cpuset_zone
;
105 static struct mtx cpuset_lock
;
106 static struct setlist cpuset_ids
;
107 static struct unrhdr
*cpuset_unr
;
108 static struct cpuset
*cpuset_zero
;
110 cpuset_t
*cpuset_root
;
113 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
116 cpuset_ref(struct cpuset
*set
)
119 refcount_acquire(&set
->cs_ref
);
124 * Walks up the tree from 'set' to find the root. Returns the root
127 static struct cpuset
*
128 cpuset_refroot(struct cpuset
*set
)
131 for (; set
->cs_parent
!= NULL
; set
= set
->cs_parent
)
132 if (set
->cs_flags
& CPU_SET_ROOT
)
140 * Find the first non-anonymous set starting from 'set'. Returns this set
141 * referenced. May return the passed in set with an extra ref if it is
144 static struct cpuset
*
145 cpuset_refbase(struct cpuset
*set
)
148 if (set
->cs_id
== CPUSET_INVALID
)
149 set
= set
->cs_parent
;
156 * Release a reference in a context where it is safe to allocte.
159 cpuset_rel(struct cpuset
*set
)
163 if (refcount_release(&set
->cs_ref
) == 0)
165 mtx_lock_spin(&cpuset_lock
);
166 LIST_REMOVE(set
, cs_siblings
);
168 if (id
!= CPUSET_INVALID
)
169 LIST_REMOVE(set
, cs_link
);
170 mtx_unlock_spin(&cpuset_lock
);
171 cpuset_rel(set
->cs_parent
);
172 uma_zfree(cpuset_zone
, set
);
173 if (id
!= CPUSET_INVALID
)
174 free_unr(cpuset_unr
, id
);
178 * Deferred release must be used when in a context that is not safe to
179 * allocate/free. This places any unreferenced sets on the list 'head'.
182 cpuset_rel_defer(struct setlist
*head
, struct cpuset
*set
)
185 if (refcount_release(&set
->cs_ref
) == 0)
187 mtx_lock_spin(&cpuset_lock
);
188 LIST_REMOVE(set
, cs_siblings
);
189 if (set
->cs_id
!= CPUSET_INVALID
)
190 LIST_REMOVE(set
, cs_link
);
191 LIST_INSERT_HEAD(head
, set
, cs_link
);
192 mtx_unlock_spin(&cpuset_lock
);
196 * Complete a deferred release. Removes the set from the list provided to
200 cpuset_rel_complete(struct cpuset
*set
)
202 LIST_REMOVE(set
, cs_link
);
203 cpuset_rel(set
->cs_parent
);
204 uma_zfree(cpuset_zone
, set
);
208 * Find a set based on an id. Returns it with a ref.
210 static struct cpuset
*
211 cpuset_lookup(cpusetid_t setid
)
215 if (setid
== CPUSET_INVALID
)
217 mtx_lock_spin(&cpuset_lock
);
218 LIST_FOREACH(set
, &cpuset_ids
, cs_link
)
219 if (set
->cs_id
== setid
)
223 mtx_unlock_spin(&cpuset_lock
);
228 * Create a set in the space provided in 'set' with the provided parameters.
229 * The set is returned with a single ref. May return EDEADLK if the set
230 * will have no valid cpu based on restrictions from the parent.
233 _cpuset_create(struct cpuset
*set
, struct cpuset
*parent
, cpuset_t
*mask
,
237 if (!CPU_OVERLAP(&parent
->cs_mask
, mask
))
239 CPU_COPY(mask
, &set
->cs_mask
);
240 LIST_INIT(&set
->cs_children
);
241 refcount_init(&set
->cs_ref
, 1);
243 mtx_lock_spin(&cpuset_lock
);
244 CPU_AND(mask
, &parent
->cs_mask
);
246 set
->cs_parent
= cpuset_ref(parent
);
247 LIST_INSERT_HEAD(&parent
->cs_children
, set
, cs_siblings
);
248 if (set
->cs_id
!= CPUSET_INVALID
)
249 LIST_INSERT_HEAD(&cpuset_ids
, set
, cs_link
);
250 mtx_unlock_spin(&cpuset_lock
);
256 * Create a new non-anonymous set with the requested parent and mask. May
257 * return failures if the mask is invalid or a new number can not be
261 cpuset_create(struct cpuset
**setp
, struct cpuset
*parent
, cpuset_t
*mask
)
267 id
= alloc_unr(cpuset_unr
);
270 *setp
= set
= uma_zalloc(cpuset_zone
, M_WAITOK
);
271 error
= _cpuset_create(set
, parent
, mask
, id
);
274 free_unr(cpuset_unr
, id
);
275 uma_zfree(cpuset_zone
, set
);
281 * Recursively check for errors that would occur from applying mask to
282 * the tree of sets starting at 'set'. Checks for sets that would become
283 * empty as well as RDONLY flags.
286 cpuset_testupdate(struct cpuset
*set
, cpuset_t
*mask
)
292 mtx_assert(&cpuset_lock
, MA_OWNED
);
293 if (set
->cs_flags
& CPU_SET_RDONLY
)
295 if (!CPU_OVERLAP(&set
->cs_mask
, mask
))
297 CPU_COPY(&set
->cs_mask
, &newmask
);
298 CPU_AND(&newmask
, mask
);
300 LIST_FOREACH(nset
, &set
->cs_children
, cs_siblings
)
301 if ((error
= cpuset_testupdate(nset
, &newmask
)) != 0)
307 * Applies the mask 'mask' without checking for empty sets or permissions.
310 cpuset_update(struct cpuset
*set
, cpuset_t
*mask
)
314 mtx_assert(&cpuset_lock
, MA_OWNED
);
315 CPU_AND(&set
->cs_mask
, mask
);
316 LIST_FOREACH(nset
, &set
->cs_children
, cs_siblings
)
317 cpuset_update(nset
, &set
->cs_mask
);
323 * Modify the set 'set' to use a copy of the mask provided. Apply this new
324 * mask to restrict all children in the tree. Checks for validity before
325 * applying the changes.
328 cpuset_modify(struct cpuset
*set
, cpuset_t
*mask
)
333 error
= priv_check(curthread
, PRIV_SCHED_CPUSET
);
337 * Verify that we have access to this set of
340 root
= set
->cs_parent
;
341 if (root
&& !CPU_SUBSET(&root
->cs_mask
, mask
))
343 mtx_lock_spin(&cpuset_lock
);
344 error
= cpuset_testupdate(set
, mask
);
347 cpuset_update(set
, mask
);
348 CPU_COPY(mask
, &set
->cs_mask
);
350 mtx_unlock_spin(&cpuset_lock
);
356 * Resolve the 'which' parameter of several cpuset apis.
358 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
359 * checks for permission via p_cansched().
361 * For WHICH_SET returns a valid set with a new reference.
363 * -1 may be supplied for any argument to mean the current proc/thread or
364 * the base set of the current thread. May fail with ESRCH/EPERM.
367 cpuset_which(cpuwhich_t which
, id_t id
, struct proc
**pp
, struct thread
**tdp
,
368 struct cpuset
**setp
)
385 if ((p
= pfind(id
)) == NULL
)
395 sx_slock(&allproc_lock
);
396 FOREACH_PROC_IN_SYSTEM(p
) {
398 FOREACH_THREAD_IN_PROC(p
, td
)
399 if (td
->td_tid
== id
)
405 sx_sunlock(&allproc_lock
);
409 case CPU_WHICH_CPUSET
:
411 thread_lock(curthread
);
412 set
= cpuset_refbase(curthread
->td_cpuset
);
413 thread_unlock(curthread
);
415 set
= cpuset_lookup(id
);
426 error
= p_cansched(curthread
, p
);
432 td
= FIRST_THREAD_IN_PROC(p
);
439 * Create an anonymous set with the provided mask in the space provided by
440 * 'fset'. If the passed in set is anonymous we use its parent otherwise
441 * the new set is a child of 'set'.
444 cpuset_shadow(struct cpuset
*set
, struct cpuset
*fset
, cpuset_t
*mask
)
446 struct cpuset
*parent
;
448 if (set
->cs_id
== CPUSET_INVALID
)
449 parent
= set
->cs_parent
;
452 if (!CPU_SUBSET(&parent
->cs_mask
, mask
))
454 return (_cpuset_create(fset
, parent
, mask
, CPUSET_INVALID
));
458 * Handle two cases for replacing the base set or mask of an entire process.
460 * 1) Set is non-null and mask is null. This reparents all anonymous sets
461 * to the provided set and replaces all non-anonymous td_cpusets with the
463 * 2) Mask is non-null and set is null. This replaces or creates anonymous
464 * sets for every thread with the existing base as a parent.
466 * This is overly complicated because we can't allocate while holding a
467 * spinlock and spinlocks must be held while changing and examining thread
471 cpuset_setproc(pid_t pid
, struct cpuset
*set
, cpuset_t
*mask
)
473 struct setlist freelist
;
474 struct setlist droplist
;
475 struct cpuset
*tdset
;
483 * The algorithm requires two passes due to locking considerations.
485 * 1) Lookup the process and acquire the locks in the required order.
486 * 2) If enough cpusets have not been allocated release the locks and
487 * allocate them. Loop.
489 LIST_INIT(&freelist
);
490 LIST_INIT(&droplist
);
493 error
= cpuset_which(CPU_WHICH_PID
, pid
, &p
, &td
, &nset
);
496 if (nfree
>= p
->p_numthreads
)
498 threads
= p
->p_numthreads
;
500 for (; nfree
< threads
; nfree
++) {
501 nset
= uma_zalloc(cpuset_zone
, M_WAITOK
);
502 LIST_INSERT_HEAD(&freelist
, nset
, cs_link
);
505 PROC_LOCK_ASSERT(p
, MA_OWNED
);
507 * Now that the appropriate locks are held and we have enough cpusets,
508 * make sure the operation will succeed before applying changes. The
509 * proc lock prevents td_cpuset from changing between calls.
512 FOREACH_THREAD_IN_PROC(p
, td
) {
514 tdset
= td
->td_cpuset
;
516 * Verify that a new mask doesn't specify cpus outside of
517 * the set the thread is a member of.
520 if (tdset
->cs_id
== CPUSET_INVALID
)
521 tdset
= tdset
->cs_parent
;
522 if (!CPU_SUBSET(&tdset
->cs_mask
, mask
))
525 * Verify that a new set won't leave an existing thread
526 * mask without a cpu to run on. It can, however, restrict
529 } else if (tdset
->cs_id
== CPUSET_INVALID
) {
530 if (!CPU_OVERLAP(&set
->cs_mask
, &tdset
->cs_mask
))
538 * Replace each thread's cpuset while using deferred release. We
539 * must do this because the thread lock must be held while operating
540 * on the thread and this limits the type of operations allowed.
542 FOREACH_THREAD_IN_PROC(p
, td
) {
545 * If we presently have an anonymous set or are applying a
546 * mask we must create an anonymous shadow set. That is
547 * either parented to our existing base or the supplied set.
549 * If we have a base set with no anonymous shadow we simply
550 * replace it outright.
552 tdset
= td
->td_cpuset
;
553 if (tdset
->cs_id
== CPUSET_INVALID
|| mask
) {
554 nset
= LIST_FIRST(&freelist
);
555 LIST_REMOVE(nset
, cs_link
);
557 error
= cpuset_shadow(tdset
, nset
, mask
);
559 error
= _cpuset_create(nset
, set
,
560 &tdset
->cs_mask
, CPUSET_INVALID
);
562 LIST_INSERT_HEAD(&freelist
, nset
, cs_link
);
567 nset
= cpuset_ref(set
);
568 cpuset_rel_defer(&droplist
, tdset
);
569 td
->td_cpuset
= nset
;
576 while ((nset
= LIST_FIRST(&droplist
)) != NULL
)
577 cpuset_rel_complete(nset
);
578 while ((nset
= LIST_FIRST(&freelist
)) != NULL
) {
579 LIST_REMOVE(nset
, cs_link
);
580 uma_zfree(cpuset_zone
, nset
);
586 * Apply an anonymous mask to a single thread.
589 cpuset_setthread(lwpid_t id
, cpuset_t
*mask
)
597 nset
= uma_zalloc(cpuset_zone
, M_WAITOK
);
598 error
= cpuset_which(CPU_WHICH_TID
, id
, &p
, &td
, &set
);
603 error
= cpuset_shadow(td
->td_cpuset
, nset
, mask
);
606 td
->td_cpuset
= nset
;
616 uma_zfree(cpuset_zone
, nset
);
621 * Creates the cpuset for thread0. We make two sets:
623 * 0 - The root set which should represent all valid processors in the
624 * system. It is initially created with a mask of all processors
625 * because we don't know what processors are valid until cpuset_init()
626 * runs. This set is immutable.
627 * 1 - The default set which all processes are a member of until changed.
628 * This allows an administrator to move all threads off of given cpus to
629 * dedicate them to high priority tasks or save power etc.
637 cpuset_zone
= uma_zcreate("cpuset", sizeof(struct cpuset
), NULL
, NULL
,
638 NULL
, NULL
, UMA_ALIGN_PTR
, 0);
639 mtx_init(&cpuset_lock
, "cpuset", NULL
, MTX_SPIN
| MTX_RECURSE
);
641 * Create the root system set for the whole machine. Doesn't use
642 * cpuset_create() due to NULL parent.
644 set
= uma_zalloc(cpuset_zone
, M_WAITOK
| M_ZERO
);
645 set
->cs_mask
.__bits
[0] = -1;
646 LIST_INIT(&set
->cs_children
);
647 LIST_INSERT_HEAD(&cpuset_ids
, set
, cs_link
);
649 set
->cs_flags
= CPU_SET_ROOT
;
651 cpuset_root
= &set
->cs_mask
;
653 * Now derive a default, modifiable set from that to give out.
655 set
= uma_zalloc(cpuset_zone
, M_WAITOK
);
656 error
= _cpuset_create(set
, cpuset_zero
, &cpuset_zero
->cs_mask
, 1);
657 KASSERT(error
== 0, ("Error creating default set: %d\n", error
));
659 * Initialize the unit allocator. 0 and 1 are allocated above.
661 cpuset_unr
= new_unrhdr(2, INT_MAX
, NULL
);
667 * This is called once the final set of system cpus is known. Modifies
668 * the root set and all children and mark the root readonly.
671 cpuset_init(void *arg
)
677 mask
.__bits
[0] = all_cpus
;
681 if (cpuset_modify(cpuset_zero
, &mask
))
682 panic("Can't set initial cpuset mask.\n");
683 cpuset_zero
->cs_flags
|= CPU_SET_RDONLY
;
685 SYSINIT(cpuset
, SI_SUB_SMP
, SI_ORDER_ANY
, cpuset_init
, NULL
);
687 #ifndef _SYS_SYSPROTO_H_
693 cpuset(struct thread
*td
, struct cpuset_args
*uap
)
700 root
= cpuset_refroot(td
->td_cpuset
);
702 error
= cpuset_create(&set
, root
, &root
->cs_mask
);
706 error
= copyout(&set
->cs_id
, uap
->setid
, sizeof(set
->cs_id
));
708 error
= cpuset_setproc(-1, set
, NULL
);
713 #ifndef _SYS_SYSPROTO_H_
714 struct cpuset_setid_args
{
721 cpuset_setid(struct thread
*td
, struct cpuset_setid_args
*uap
)
727 * Presently we only support per-process sets.
729 if (uap
->which
!= CPU_WHICH_PID
)
731 set
= cpuset_lookup(uap
->setid
);
734 error
= cpuset_setproc(uap
->id
, set
, NULL
);
739 #ifndef _SYS_SYSPROTO_H_
740 struct cpuset_getid_args
{
747 cpuset_getid(struct thread
*td
, struct cpuset_getid_args
*uap
)
756 if (uap
->level
== CPU_LEVEL_WHICH
&& uap
->which
!= CPU_WHICH_CPUSET
)
758 error
= cpuset_which(uap
->which
, uap
->id
, &p
, &ttd
, &set
);
761 switch (uap
->which
) {
765 set
= cpuset_refbase(ttd
->td_cpuset
);
769 case CPU_WHICH_CPUSET
:
774 switch (uap
->level
) {
776 nset
= cpuset_refroot(set
);
780 case CPU_LEVEL_CPUSET
:
782 case CPU_LEVEL_WHICH
:
788 error
= copyout(&id
, uap
->setid
, sizeof(id
));
793 #ifndef _SYS_SYSPROTO_H_
794 struct cpuset_getaffinity_args
{
803 cpuset_getaffinity(struct thread
*td
, struct cpuset_getaffinity_args
*uap
)
813 if (uap
->cpusetsize
< sizeof(cpuset_t
) ||
814 uap
->cpusetsize
> CPU_MAXSIZE
/ NBBY
)
816 size
= uap
->cpusetsize
;
817 mask
= malloc(size
, M_TEMP
, M_WAITOK
| M_ZERO
);
818 error
= cpuset_which(uap
->which
, uap
->id
, &p
, &ttd
, &set
);
821 switch (uap
->level
) {
823 case CPU_LEVEL_CPUSET
:
824 switch (uap
->which
) {
828 set
= cpuset_ref(ttd
->td_cpuset
);
831 case CPU_WHICH_CPUSET
:
837 if (uap
->level
== CPU_LEVEL_ROOT
)
838 nset
= cpuset_refroot(set
);
840 nset
= cpuset_refbase(set
);
841 CPU_COPY(&nset
->cs_mask
, mask
);
844 case CPU_LEVEL_WHICH
:
845 switch (uap
->which
) {
848 CPU_COPY(&ttd
->td_cpuset
->cs_mask
, mask
);
852 FOREACH_THREAD_IN_PROC(p
, ttd
) {
854 CPU_OR(mask
, &ttd
->td_cpuset
->cs_mask
);
858 case CPU_WHICH_CPUSET
:
859 CPU_COPY(&set
->cs_mask
, mask
);
862 error
= intr_getaffinity(uap
->id
, mask
);
875 error
= copyout(mask
, uap
->mask
, size
);
881 #ifndef _SYS_SYSPROTO_H_
882 struct cpuset_setaffinity_args
{
887 const cpuset_t
*mask
;
891 cpuset_setaffinity(struct thread
*td
, struct cpuset_setaffinity_args
*uap
)
900 if (uap
->cpusetsize
< sizeof(cpuset_t
) ||
901 uap
->cpusetsize
> CPU_MAXSIZE
/ NBBY
)
903 mask
= malloc(uap
->cpusetsize
, M_TEMP
, M_WAITOK
| M_ZERO
);
904 error
= copyin(uap
->mask
, mask
, uap
->cpusetsize
);
908 * Verify that no high bits are set.
910 if (uap
->cpusetsize
> sizeof(cpuset_t
)) {
914 end
= cp
= (char *)&mask
->__bits
;
915 end
+= uap
->cpusetsize
;
916 cp
+= sizeof(cpuset_t
);
924 switch (uap
->level
) {
926 case CPU_LEVEL_CPUSET
:
927 error
= cpuset_which(uap
->which
, uap
->id
, &p
, &ttd
, &set
);
930 switch (uap
->which
) {
934 set
= cpuset_ref(ttd
->td_cpuset
);
938 case CPU_WHICH_CPUSET
:
944 if (uap
->level
== CPU_LEVEL_ROOT
)
945 nset
= cpuset_refroot(set
);
947 nset
= cpuset_refbase(set
);
948 error
= cpuset_modify(nset
, mask
);
952 case CPU_LEVEL_WHICH
:
953 switch (uap
->which
) {
955 error
= cpuset_setthread(uap
->id
, mask
);
958 error
= cpuset_setproc(uap
->id
, NULL
, mask
);
960 case CPU_WHICH_CPUSET
:
961 error
= cpuset_which(CPU_WHICH_CPUSET
, uap
->id
, &p
,
964 error
= cpuset_modify(set
, mask
);
969 error
= intr_setaffinity(uap
->id
, mask
);
986 DB_SHOW_COMMAND(cpusets
, db_show_cpusets
)
991 LIST_FOREACH(set
, &cpuset_ids
, cs_link
) {
992 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
993 set
, set
->cs_id
, set
->cs_ref
, set
->cs_flags
,
994 (set
->cs_parent
!= NULL
) ? set
->cs_parent
->cs_id
: 0);
996 for (once
= 0, cpu
= 0; cpu
< CPU_SETSIZE
; cpu
++) {
997 if (CPU_ISSET(cpu
, &set
->cs_mask
)) {
999 db_printf("%d", cpu
);
1002 db_printf(",%d", cpu
);