2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sergey Glushchenko <deen@smz.com.ua>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cpumask.h>
37 #include <sys/errno.h>
38 #include <sys/globaldata.h> /* curthread */
41 #include <sys/sysproto.h> /* struct usched_set_args */
42 #include <sys/systm.h> /* strcmp() */
43 #include <sys/usched.h>
45 #include <machine/smp.h>
47 static TAILQ_HEAD(, usched
) usched_list
= TAILQ_HEAD_INITIALIZER(usched_list
);
49 cpumask_t usched_mastermask
= CPUMASK_INITIALIZER_ALLONES
;
51 static int setaffinity_lp(struct lwp
*lp
, cpumask_t
*mask
);
54 * Called from very low level boot code, sys/kern/init_main.c:mi_proc0init().
55 * We cannot do anything fancy. no malloc's, no nothing other then
56 * static initialization.
63 defsched
= kgetenv("kern.user_scheduler");
66 * Add various userland schedulers to the system.
68 usched_ctl(&usched_bsd4
, USCH_ADD
);
69 usched_ctl(&usched_dfly
, USCH_ADD
);
70 usched_ctl(&usched_dummy
, USCH_ADD
);
71 if (defsched
== NULL
)
73 if (strcmp(defsched
, "bsd4") == 0)
75 if (strcmp(defsched
, "dfly") == 0)
77 kprintf("WARNING: Running dummy userland scheduler\n");
78 return(&usched_dummy
);
85 * Add/remove usched to/from list.
88 * usched - pointer to target scheduler
89 * action - addition or removal ?
96 usched_ctl(struct usched
*usched
, int action
)
98 struct usched
*item
; /* temporaly for TAILQ processing */
104 * Make sure it isn't already on the list
107 TAILQ_FOREACH(item
, &usched_list
, entry
) {
108 KKASSERT(item
!= usched
);
112 * Optional callback to the scheduler before we officially
113 * add it to the list.
115 if (usched
->usched_register
)
116 usched
->usched_register();
117 TAILQ_INSERT_TAIL(&usched_list
, usched
, entry
);
121 * Do not allow the default scheduler to be removed
123 if (strcmp(usched
->name
, "bsd4") == 0) {
127 TAILQ_FOREACH(item
, &usched_list
, entry
) {
132 if (item
->usched_unregister
)
133 item
->usched_unregister();
134 TAILQ_REMOVE(&usched_list
, item
, entry
);
147 * Called from the scheduler clock on each cpu independently at the
148 * common scheduling rate. If th scheduler clock interrupted a running
149 * lwp the lp will be non-NULL.
152 usched_schedulerclock(struct lwp
*lp
, sysclock_t periodic
, sysclock_t time
)
156 TAILQ_FOREACH(item
, &usched_list
, entry
) {
157 if (lp
&& lp
->lwp_proc
->p_usched
== item
)
158 item
->schedulerclock(lp
, periodic
, time
);
160 item
->schedulerclock(NULL
, periodic
, time
);
165 * USCHED_SET(syscall)
168 * Setting up a proc's usched.
177 * EFBIG - error (invalid cpu#)
178 * EPERM - error (failed to delete cpu#)
179 * EINVAL - error (other reasons)
184 sys_usched_set(struct usched_set_args
*uap
)
186 struct proc
*p
= curthread
->td_proc
;
187 struct usched
*item
; /* temporaly for TAILQ processing */
189 char buffer
[NAME_LENGTH
];
194 if (uap
->pid
!= 0 && uap
->pid
!= curthread
->td_proc
->p_pid
)
197 lp
= curthread
->td_lwp
;
198 lwkt_gettoken(&lp
->lwp_token
);
201 case USCHED_SET_SCHEDULER
:
202 if ((error
= priv_check(curthread
, PRIV_SCHED_SET
)) != 0)
204 error
= copyinstr(uap
->data
, buffer
, sizeof(buffer
), NULL
);
207 TAILQ_FOREACH(item
, &usched_list
, entry
) {
208 if ((strcmp(item
->name
, buffer
) == 0))
213 * If the scheduler for a process is being changed, disassociate
214 * the old scheduler before switching to the new one.
216 * XXX we might have to add an additional ABI call to do a 'full
217 * disassociation' and another ABI call to do a 'full
220 /* XXX lwp have to deal with multiple lwps here */
221 if (p
->p_nthreads
!= 1) {
225 if (item
&& item
!= p
->p_usched
) {
227 p
->p_usched
->release_curproc(ONLY_LWP_IN_PROC(p
));
228 p
->p_usched
->heuristic_exiting(ONLY_LWP_IN_PROC(p
), p
);
230 } else if (item
== NULL
) {
235 if ((error
= priv_check(curthread
, PRIV_SCHED_CPUSET
)) != 0)
237 if (uap
->bytes
!= sizeof(int)) {
241 error
= copyin(uap
->data
, &cpuid
, sizeof(int));
244 if (cpuid
< 0 || cpuid
>= ncpus
) {
248 if (CPUMASK_TESTBIT(smp_active_mask
, cpuid
) == 0) {
252 CPUMASK_ASSBIT(lp
->lwp_cpumask
, cpuid
);
253 if (cpuid
!= mycpu
->gd_cpuid
) {
254 lwkt_migratecpu(cpuid
);
255 p
->p_usched
->changedcpu(lp
);
259 /* USCHED_GET_CPU doesn't require special privileges. */
260 if (uap
->bytes
!= sizeof(int)) {
264 error
= copyout(&(mycpu
->gd_cpuid
), uap
->data
, sizeof(int));
266 case USCHED_GET_CPUMASK
:
267 /* USCHED_GET_CPUMASK doesn't require special privileges. */
268 if (uap
->bytes
!= sizeof(cpumask_t
)) {
272 mask
= lp
->lwp_cpumask
;
273 CPUMASK_ANDMASK(mask
, smp_active_mask
);
274 error
= copyout(&mask
, uap
->data
, sizeof(cpumask_t
));
277 if ((error
= priv_check(curthread
, PRIV_SCHED_CPUSET
)) != 0)
279 if (uap
->bytes
!= sizeof(int)) {
283 error
= copyin(uap
->data
, &cpuid
, sizeof(int));
286 if (cpuid
< 0 || cpuid
>= ncpus
) {
290 if (CPUMASK_TESTBIT(smp_active_mask
, cpuid
) == 0) {
294 CPUMASK_ORBIT(lp
->lwp_cpumask
, cpuid
);
297 /* USCHED_DEL_CPU doesn't require special privileges. */
298 if (uap
->bytes
!= sizeof(int)) {
302 error
= copyin(uap
->data
, &cpuid
, sizeof(int));
305 if (cpuid
< 0 || cpuid
>= ncpus
) {
309 lp
= curthread
->td_lwp
;
310 mask
= lp
->lwp_cpumask
;
311 CPUMASK_ANDMASK(mask
, smp_active_mask
);
312 CPUMASK_NANDBIT(mask
, cpuid
);
313 if (CPUMASK_TESTZERO(mask
)) {
316 CPUMASK_NANDBIT(lp
->lwp_cpumask
, cpuid
);
317 if (CPUMASK_TESTMASK(lp
->lwp_cpumask
,
318 mycpu
->gd_cpumask
) == 0) {
319 mask
= lp
->lwp_cpumask
;
320 CPUMASK_ANDMASK(mask
, smp_active_mask
);
321 cpuid
= BSFCPUMASK(mask
);
322 lwkt_migratecpu(cpuid
);
323 p
->p_usched
->changedcpu(lp
);
327 case USCHED_SET_CPUMASK
:
328 if ((error
= priv_check(curthread
, PRIV_SCHED_CPUSET
)) != 0)
330 if (uap
->bytes
!= sizeof(mask
)) {
334 error
= copyin(uap
->data
, &mask
, sizeof(mask
));
338 CPUMASK_ANDMASK(mask
, smp_active_mask
);
339 if (CPUMASK_TESTZERO(mask
)) {
343 /* Commit the new cpumask. */
344 lp
->lwp_cpumask
= mask
;
346 /* Migrate if necessary. */
347 if (CPUMASK_TESTMASK(lp
->lwp_cpumask
, mycpu
->gd_cpumask
) == 0) {
348 cpuid
= BSFCPUMASK(lp
->lwp_cpumask
);
349 lwkt_migratecpu(cpuid
);
350 p
->p_usched
->changedcpu(lp
);
357 lwkt_reltoken(&lp
->lwp_token
);
363 sys_lwp_getaffinity(struct lwp_getaffinity_args
*uap
)
377 p
= pfind(uap
->pid
); /* pfind() holds (p) */
381 lwkt_gettoken(&p
->p_token
);
384 lp
= RB_FIRST(lwp_rb_tree
, &p
->p_lwp_tree
);
386 lp
= lwp_rb_tree_RB_LOOKUP(&p
->p_lwp_tree
, uap
->tid
);
391 /* Take a snapshot for copyout, which may block. */
393 lwkt_gettoken(&lp
->lwp_token
);
394 mask
= lp
->lwp_cpumask
;
395 CPUMASK_ANDMASK(mask
, smp_active_mask
);
396 lwkt_reltoken(&lp
->lwp_token
);
400 lwkt_reltoken(&p
->p_token
);
404 error
= copyout(&mask
, uap
->mask
, sizeof(cpumask_t
));
410 sys_lwp_setaffinity(struct lwp_setaffinity_args
*uap
)
419 * Always allow change self CPU affinity.
421 if ((error
= priv_check(curthread
, PRIV_SCHED_CPUSET
)) != 0 &&
425 error
= copyin(uap
->mask
, &mask
, sizeof(mask
));
429 CPUMASK_ANDMASK(mask
, smp_active_mask
);
430 if (CPUMASK_TESTZERO(mask
))
442 p
= pfind(uap
->pid
); /* pfind() holds (p) */
446 lwkt_gettoken(&p
->p_token
);
449 FOREACH_LWP_IN_PROC(lp
, p
) {
450 error
= setaffinity_lp(lp
, &mask
);
452 /* not an error if no LPs left in process */
454 lp
= lwp_rb_tree_RB_LOOKUP(&p
->p_lwp_tree
, uap
->tid
);
455 error
= setaffinity_lp(lp
, &mask
);
457 lwkt_reltoken(&p
->p_token
);
464 setaffinity_lp(struct lwp
*lp
, cpumask_t
*mask
)
470 lwkt_gettoken(&lp
->lwp_token
);
471 lp
->lwp_cpumask
= *mask
;
474 * NOTE: When adjusting a thread that is not our own the migration
475 * will occur at the next reschedule.
477 if (lp
== curthread
->td_lwp
) {
479 * Self migration can be done immediately,
482 if (CPUMASK_TESTBIT(lp
->lwp_cpumask
,
483 mycpu
->gd_cpuid
) == 0) {
484 lwkt_migratecpu(BSFCPUMASK(lp
->lwp_cpumask
));
485 lp
->lwp_proc
->p_usched
->changedcpu(lp
);
488 lwkt_reltoken(&lp
->lwp_token
);