linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / kern / kern_usched.c
blob9ef93fefa44b3511bd1236d4f48a9a41d1d67169
1 /*
2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sergey Glushchenko <deen@smz.com.ua>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_usched.c,v 1.9 2007/07/02 17:06:55 dillon Exp $
37 #include <sys/errno.h>
38 #include <sys/globaldata.h> /* curthread */
39 #include <sys/proc.h>
40 #include <sys/priv.h>
41 #include <sys/sysproto.h> /* struct usched_set_args */
42 #include <sys/systm.h> /* strcmp() */
43 #include <sys/usched.h>
45 #include <sys/mplock2.h>
47 #include <machine/smp.h>
49 static TAILQ_HEAD(, usched) usched_list = TAILQ_HEAD_INITIALIZER(usched_list);
51 cpumask_t usched_mastermask = -1;
54 * Called from very low level boot code, i386/i386/machdep.c/init386().
55 * We cannot do anything fancy. no malloc's, no nothing other then
56 * static initialization.
58 struct usched *
59 usched_init(void)
61 const char *defsched;
63 defsched = kgetenv("kern.user_scheduler");
66 * Add various userland schedulers to the system.
68 usched_ctl(&usched_bsd4, USCH_ADD);
69 usched_ctl(&usched_dummy, USCH_ADD);
70 if (defsched == NULL )
71 return(&usched_bsd4);
72 if (strcmp(defsched, "bsd4") == 0)
73 return(&usched_bsd4);
74 kprintf("WARNING: Running dummy userland scheduler\n");
75 return(&usched_dummy);
79 * USCHED_CTL
81 * SYNOPSIS:
82 * Add/remove usched to/from list.
84 * ARGUMENTS:
85 * usched - pointer to target scheduler
86 * action - addition or removal ?
88 * RETURN VALUES:
89 * 0 - success
90 * EINVAL - error
92 int
93 usched_ctl(struct usched *usched, int action)
95 struct usched *item; /* temporaly for TAILQ processing */
96 int error = 0;
98 switch(action) {
99 case USCH_ADD:
101 * Make sure it isn't already on the list
103 #ifdef INVARIANTS
104 TAILQ_FOREACH(item, &usched_list, entry) {
105 KKASSERT(item != usched);
107 #endif
109 * Optional callback to the scheduler before we officially
110 * add it to the list.
112 if (usched->usched_register)
113 usched->usched_register();
114 TAILQ_INSERT_TAIL(&usched_list, usched, entry);
115 break;
116 case USCH_REM:
118 * Do not allow the default scheduler to be removed
120 if (strcmp(usched->name, "bsd4") == 0) {
121 error = EINVAL;
122 break;
124 TAILQ_FOREACH(item, &usched_list, entry) {
125 if (item == usched)
126 break;
128 if (item) {
129 if (item->usched_unregister)
130 item->usched_unregister();
131 TAILQ_REMOVE(&usched_list, item, entry);
132 } else {
133 error = EINVAL;
135 break;
136 default:
137 error = EINVAL;
138 break;
140 return (error);
144 * USCHED_SET(syscall)
146 * SYNOPSIS:
147 * Setting up a proc's usched.
149 * ARGUMENTS:
150 * pid -
151 * cmd -
152 * data -
153 * bytes -
154 * RETURN VALUES:
155 * 0 - success
156 * EINVAL - error
158 * MPALMOSTSAFE
161 sys_usched_set(struct usched_set_args *uap)
163 struct proc *p = curthread->td_proc;
164 struct usched *item; /* temporaly for TAILQ processing */
165 int error;
166 char buffer[NAME_LENGTH];
167 cpumask_t mask;
168 struct lwp *lp;
169 int cpuid;
171 if (uap->pid != 0 && uap->pid != curthread->td_proc->p_pid)
172 return (EINVAL);
174 lp = curthread->td_lwp;
175 get_mplock();
177 switch (uap->cmd) {
178 case USCHED_SET_SCHEDULER:
179 if ((error = priv_check(curthread, PRIV_SCHED_SET)) != 0)
180 break;
181 error = copyinstr(uap->data, buffer, sizeof(buffer), NULL);
182 if (error)
183 break;
184 TAILQ_FOREACH(item, &usched_list, entry) {
185 if ((strcmp(item->name, buffer) == 0))
186 break;
190 * If the scheduler for a process is being changed, disassociate
191 * the old scheduler before switching to the new one.
193 * XXX we might have to add an additional ABI call to do a 'full
194 * disassociation' and another ABI call to do a 'full
195 * reassociation'
197 /* XXX lwp have to deal with multiple lwps here */
198 if (p->p_nthreads != 1) {
199 error = EINVAL;
200 break;
202 if (item && item != p->p_usched) {
203 /* XXX lwp */
204 p->p_usched->release_curproc(ONLY_LWP_IN_PROC(p));
205 p->p_usched = item;
206 } else if (item == NULL) {
207 error = EINVAL;
209 break;
210 case USCHED_SET_CPU:
211 if ((error = priv_check(curthread, PRIV_SCHED_CPUSET)) != 0)
212 break;
213 if (uap->bytes != sizeof(int)) {
214 error = EINVAL;
215 break;
217 error = copyin(uap->data, &cpuid, sizeof(int));
218 if (error)
219 break;
220 if (cpuid < 0 || cpuid >= ncpus) {
221 error = EFBIG;
222 break;
224 if ((smp_active_mask & (1 << cpuid)) == 0) {
225 error = EINVAL;
226 break;
228 lp->lwp_cpumask = 1 << cpuid;
229 if (cpuid != mycpu->gd_cpuid)
230 lwkt_migratecpu(cpuid);
231 break;
232 case USCHED_GET_CPU:
233 /* USCHED_GET_CPU doesn't require special privileges. */
234 if (uap->bytes != sizeof(int)) {
235 error = EINVAL;
236 break;
238 error = copyout(&(mycpu->gd_cpuid), uap->data, sizeof(int));
239 break;
240 case USCHED_ADD_CPU:
241 if ((error = priv_check(curthread, PRIV_SCHED_CPUSET)) != 0)
242 break;
243 if (uap->bytes != sizeof(int)) {
244 error = EINVAL;
245 break;
247 error = copyin(uap->data, &cpuid, sizeof(int));
248 if (error)
249 break;
250 if (cpuid < 0 || cpuid >= ncpus) {
251 error = EFBIG;
252 break;
254 if (!(smp_active_mask & (1 << cpuid))) {
255 error = EINVAL;
256 break;
258 lp->lwp_cpumask |= 1 << cpuid;
259 break;
260 case USCHED_DEL_CPU:
261 /* USCHED_DEL_CPU doesn't require special privileges. */
262 if (uap->bytes != sizeof(int)) {
263 error = EINVAL;
264 break;
266 error = copyin(uap->data, &cpuid, sizeof(int));
267 if (error)
268 break;
269 if (cpuid < 0 || cpuid >= ncpus) {
270 error = EFBIG;
271 break;
273 lp = curthread->td_lwp;
274 mask = lp->lwp_cpumask & smp_active_mask & ~(1 << cpuid);
275 if (mask == 0)
276 error = EPERM;
277 else {
278 lp->lwp_cpumask &= ~(1 << cpuid);
279 if ((lp->lwp_cpumask & mycpu->gd_cpumask) == 0) {
280 cpuid = bsfl(lp->lwp_cpumask & smp_active_mask);
281 lwkt_migratecpu(cpuid);
284 break;
285 default:
286 error = EINVAL;
287 break;
289 rel_mplock();
290 return (error);