Sync with NetBSD.
[dragonfly/vkernel-mp.git] / sys / kern / kern_usched.c
blob28b714240b5c33d246353bd7584c2062809b8bee
1 /*
2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sergey Glushchenko <deen@smz.com.ua>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_usched.c,v 1.8 2007/02/03 18:05:58 corecode Exp $
37 #include <sys/errno.h>
38 #include <sys/globaldata.h> /* curthread */
39 #include <sys/proc.h>
40 #include <sys/sysproto.h> /* struct usched_set_args */
41 #include <sys/systm.h> /* strcmp() */
42 #include <sys/usched.h>
43 #include <machine/smp.h>
45 static TAILQ_HEAD(, usched) usched_list = TAILQ_HEAD_INITIALIZER(usched_list);
48 * Called from very low level boot code, i386/i386/machdep.c/init386().
49 * We cannot do anything fancy. no malloc's, no nothing other then
50 * static initialization.
52 struct usched *
53 usched_init(void)
55 const char *defsched;
57 defsched = kgetenv("kern.user_scheduler");
60 * Add various userland schedulers to the system.
62 usched_ctl(&usched_bsd4, USCH_ADD);
63 usched_ctl(&usched_dummy, USCH_ADD);
64 if (defsched == NULL )
65 return(&usched_bsd4);
66 if (strcmp(defsched, "bsd4") == 0)
67 return(&usched_bsd4);
68 kprintf("WARNING: Running dummy userland scheduler\n");
69 return(&usched_dummy);
73 * USCHED_CTL
75 * SYNOPSIS:
76 * Add/remove usched to/from list.
78 * ARGUMENTS:
79 * usched - pointer to target scheduler
80 * action - addition or removal ?
82 * RETURN VALUES:
83 * 0 - success
84 * EINVAL - error
86 int
87 usched_ctl(struct usched *usched, int action)
89 struct usched *item; /* temporaly for TAILQ processing */
90 int error = 0;
92 switch(action) {
93 case USCH_ADD:
95 * Make sure it isn't already on the list
97 #ifdef INVARIANTS
98 TAILQ_FOREACH(item, &usched_list, entry) {
99 KKASSERT(item != usched);
101 #endif
103 * Optional callback to the scheduler before we officially
104 * add it to the list.
106 if (usched->usched_register)
107 usched->usched_register();
108 TAILQ_INSERT_TAIL(&usched_list, usched, entry);
109 break;
110 case USCH_REM:
112 * Do not allow the default scheduler to be removed
114 if (strcmp(usched->name, "bsd4") == 0) {
115 error = EINVAL;
116 break;
118 TAILQ_FOREACH(item, &usched_list, entry) {
119 if (item == usched)
120 break;
122 if (item) {
123 if (item->usched_unregister)
124 item->usched_unregister();
125 TAILQ_REMOVE(&usched_list, item, entry);
126 } else {
127 error = EINVAL;
129 break;
130 default:
131 error = EINVAL;
132 break;
134 return (error);
138 * USCHED_SET(syscall)
140 * SYNOPSIS:
141 * Setting up a proc's usched.
143 * ARGUMENTS:
144 * pid -
145 * cmd -
146 * data -
147 * bytes -
148 * RETURN VALUES:
149 * 0 - success
150 * EINVAL - error
153 sys_usched_set(struct usched_set_args *uap)
155 struct proc *p = curthread->td_proc;
156 struct usched *item; /* temporaly for TAILQ processing */
157 int error;
158 char buffer[NAME_LENGTH];
159 cpumask_t mask;
160 struct lwp *lp;
161 int cpuid;
163 if ((error = suser(curthread)) != 0)
164 return (error);
166 if (uap->pid != 0 && uap->pid != curthread->td_proc->p_pid)
167 return (EINVAL);
169 lp = curthread->td_lwp;
170 switch (uap->cmd) {
171 case USCHED_SET_SCHEDULER:
172 if ((error = copyinstr(uap->data, buffer, sizeof(buffer),
173 NULL)) != 0)
174 return (error);
175 TAILQ_FOREACH(item, &usched_list, entry) {
176 if ((strcmp(item->name, buffer) == 0))
177 break;
181 * If the scheduler for a process is being changed, disassociate
182 * the old scheduler before switching to the new one.
184 * XXX we might have to add an additional ABI call to do a 'full
185 * disassociation' and another ABI call to do a 'full
186 * reassociation'
188 /* XXX lwp have to deal with multiple lwps here */
189 if (p->p_nthreads != 1)
190 return (EINVAL);
191 if (item && item != p->p_usched) {
192 /* XXX lwp */
193 p->p_usched->release_curproc(ONLY_LWP_IN_PROC(p));
194 p->p_usched = item;
195 } else if (item == NULL) {
196 error = EINVAL;
198 break;
199 case USCHED_SET_CPU:
200 if (uap->bytes != sizeof(int))
201 return (EINVAL);
202 error = copyin(uap->data, &cpuid, sizeof(int));
203 if (error)
204 break;
205 if ((smp_active_mask & (1 << cpuid)) == 0) {
206 error = EINVAL;
207 break;
209 lp->lwp_cpumask = 1 << cpuid;
210 if (cpuid != mycpu->gd_cpuid)
211 lwkt_migratecpu(cpuid);
212 break;
213 case USCHED_ADD_CPU:
214 if (uap->bytes != sizeof(int))
215 return (EINVAL);
216 error = copyin(uap->data, &cpuid, sizeof(int));
217 if (error)
218 break;
219 if (!(smp_active_mask & (1 << cpuid))) {
220 error = EINVAL;
221 break;
223 lp->lwp_cpumask |= 1 << cpuid;
224 break;
225 case USCHED_DEL_CPU:
226 if (uap->bytes != sizeof(int))
227 return (EINVAL);
228 error = copyin(uap->data, &cpuid, sizeof(int));
229 if (error)
230 break;
231 lp = curthread->td_lwp;
232 mask = lp->lwp_cpumask & smp_active_mask & ~(1 << cpuid);
233 if (mask == 0)
234 error = EPERM;
235 else {
236 lp->lwp_cpumask &= ~(1 << cpuid);
237 if ((lp->lwp_cpumask & mycpu->gd_cpumask) == 0) {
238 cpuid = bsfl(lp->lwp_cpumask & smp_active_mask);
239 lwkt_migratecpu(cpuid);
242 default:
243 error = EINVAL;
244 break;
246 return (error);