get mxge to build, stage 29/many
[dragonfly.git] / sys / kern / kern_resource.c
blob32b570674ab84ab6886f5af114f28facdf202968
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/file.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/time.h>
56 #include <sys/lockf.h>
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <sys/lock.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
67 static int donice (struct proc *chgp, int n);
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
71 static struct spinlock uihash_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash; /* size of hash table - 1 */
75 static struct uidinfo *uicreate (uid_t uid);
76 static struct uidinfo *uilookup (uid_t uid);
79 * Resource controls and accounting.
82 struct getpriority_info {
83 int low;
84 int who;
87 static int getpriority_callback(struct proc *p, void *data);
89 int
90 sys_getpriority(struct getpriority_args *uap)
92 struct getpriority_info info;
93 struct proc *curp = curproc;
94 struct proc *p;
95 int low = PRIO_MAX + 1;
97 switch (uap->which) {
98 case PRIO_PROCESS:
99 if (uap->who == 0)
100 p = curp;
101 else
102 p = pfind(uap->who);
103 if (p == 0)
104 break;
105 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
106 break;
107 low = p->p_nice;
108 break;
110 case PRIO_PGRP:
112 struct pgrp *pg;
114 if (uap->who == 0)
115 pg = curp->p_pgrp;
116 else if ((pg = pgfind(uap->who)) == NULL)
117 break;
118 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
119 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
120 low = p->p_nice;
122 break;
124 case PRIO_USER:
125 if (uap->who == 0)
126 uap->who = curp->p_ucred->cr_uid;
127 info.low = low;
128 info.who = uap->who;
129 allproc_scan(getpriority_callback, &info);
130 low = info.low;
131 break;
133 default:
134 return (EINVAL);
136 if (low == PRIO_MAX + 1)
137 return (ESRCH);
138 uap->sysmsg_result = low;
139 return (0);
143 * Figure out the current lowest nice priority for processes owned
144 * by the specified user.
146 static
148 getpriority_callback(struct proc *p, void *data)
150 struct getpriority_info *info = data;
152 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
153 p->p_ucred->cr_uid == info->who &&
154 p->p_nice < info->low) {
155 info->low = p->p_nice;
157 return(0);
160 struct setpriority_info {
161 int prio;
162 int who;
163 int error;
164 int found;
167 static int setpriority_callback(struct proc *p, void *data);
170 sys_setpriority(struct setpriority_args *uap)
172 struct setpriority_info info;
173 struct proc *curp = curproc;
174 struct proc *p;
175 int found = 0, error = 0;
177 switch (uap->which) {
178 case PRIO_PROCESS:
179 if (uap->who == 0)
180 p = curp;
181 else
182 p = pfind(uap->who);
183 if (p == 0)
184 break;
185 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
186 break;
187 error = donice(p, uap->prio);
188 found++;
189 break;
191 case PRIO_PGRP:
193 struct pgrp *pg;
195 if (uap->who == 0)
196 pg = curp->p_pgrp;
197 else if ((pg = pgfind(uap->who)) == NULL)
198 break;
199 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
200 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
201 error = donice(p, uap->prio);
202 found++;
205 break;
207 case PRIO_USER:
208 if (uap->who == 0)
209 uap->who = curp->p_ucred->cr_uid;
210 info.prio = uap->prio;
211 info.who = uap->who;
212 info.error = 0;
213 info.found = 0;
214 allproc_scan(setpriority_callback, &info);
215 error = info.error;
216 found = info.found;
217 break;
219 default:
220 return (EINVAL);
222 if (found == 0)
223 return (ESRCH);
224 return (error);
227 static
229 setpriority_callback(struct proc *p, void *data)
231 struct setpriority_info *info = data;
232 int error;
234 if (p->p_ucred->cr_uid == info->who &&
235 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
236 error = donice(p, info->prio);
237 if (error)
238 info->error = error;
239 ++info->found;
241 return(0);
244 static int
245 donice(struct proc *chgp, int n)
247 struct proc *curp = curproc;
248 struct ucred *cr = curp->p_ucred;
249 struct lwp *lp;
251 if (cr->cr_uid && cr->cr_ruid &&
252 cr->cr_uid != chgp->p_ucred->cr_uid &&
253 cr->cr_ruid != chgp->p_ucred->cr_uid)
254 return (EPERM);
255 if (n > PRIO_MAX)
256 n = PRIO_MAX;
257 if (n < PRIO_MIN)
258 n = PRIO_MIN;
259 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
260 return (EACCES);
261 chgp->p_nice = n;
262 FOREACH_LWP_IN_PROC(lp, chgp)
263 chgp->p_usched->resetpriority(lp);
264 return (0);
268 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
270 struct proc *p = curproc;
271 struct lwp *lp;
272 struct rtprio rtp;
273 struct ucred *cr = p->p_ucred;
274 int error;
276 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
277 if (error)
278 return error;
280 if (uap->pid < 0) {
281 return EINVAL;
282 } else if (uap->pid == 0) {
283 /* curproc already loaded on p */
284 } else {
285 p = pfind(uap->pid);
288 if (p == 0) {
289 return ESRCH;
292 if (uap->tid < -1) {
293 return EINVAL;
294 } else if (uap->tid == -1) {
296 * sadly, tid can be 0 so we can't use 0 here
297 * like sys_rtprio()
299 lp = curthread->td_lwp;
300 } else {
301 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
302 if (lp == NULL)
303 return ESRCH;
306 switch (uap->function) {
307 case RTP_LOOKUP:
308 return (copyout(&lp->lwp_rtprio, uap->rtp,
309 sizeof(struct rtprio)));
310 case RTP_SET:
311 if (cr->cr_uid && cr->cr_ruid &&
312 cr->cr_uid != p->p_ucred->cr_uid &&
313 cr->cr_ruid != p->p_ucred->cr_uid) {
314 return EPERM;
316 /* disallow setting rtprio in most cases if not superuser */
317 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
318 /* can't set someone else's */
319 if (uap->pid) { /* XXX */
320 return EPERM;
322 /* can't set realtime priority */
324 * Realtime priority has to be restricted for reasons which should be
325 * obvious. However, for idle priority, there is a potential for
326 * system deadlock if an idleprio process gains a lock on a resource
327 * that other processes need (and the idleprio process can't run
328 * due to a CPU-bound normal process). Fix me! XXX
330 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
331 return EPERM;
334 switch (rtp.type) {
335 #ifdef RTP_PRIO_FIFO
336 case RTP_PRIO_FIFO:
337 #endif
338 case RTP_PRIO_REALTIME:
339 case RTP_PRIO_NORMAL:
340 case RTP_PRIO_IDLE:
341 if (rtp.prio > RTP_PRIO_MAX)
342 return EINVAL;
343 lp->lwp_rtprio = rtp;
344 return 0;
345 default:
346 return EINVAL;
348 default:
349 return EINVAL;
351 panic("can't get here");
355 * Set realtime priority
357 /* ARGSUSED */
359 sys_rtprio(struct rtprio_args *uap)
361 struct proc *curp = curproc;
362 struct proc *p;
363 struct lwp *lp;
364 struct ucred *cr = curp->p_ucred;
365 struct rtprio rtp;
366 int error;
368 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
369 if (error)
370 return (error);
372 if (uap->pid == 0)
373 p = curp;
374 else
375 p = pfind(uap->pid);
377 if (p == 0)
378 return (ESRCH);
380 /* XXX lwp */
381 lp = FIRST_LWP_IN_PROC(p);
382 switch (uap->function) {
383 case RTP_LOOKUP:
384 return (copyout(&lp->lwp_rtprio, uap->rtp, sizeof(struct rtprio)));
385 case RTP_SET:
386 if (cr->cr_uid && cr->cr_ruid &&
387 cr->cr_uid != p->p_ucred->cr_uid &&
388 cr->cr_ruid != p->p_ucred->cr_uid)
389 return (EPERM);
390 /* disallow setting rtprio in most cases if not superuser */
391 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
392 /* can't set someone else's */
393 if (uap->pid)
394 return (EPERM);
395 /* can't set realtime priority */
397 * Realtime priority has to be restricted for reasons which should be
398 * obvious. However, for idle priority, there is a potential for
399 * system deadlock if an idleprio process gains a lock on a resource
400 * that other processes need (and the idleprio process can't run
401 * due to a CPU-bound normal process). Fix me! XXX
403 if (RTP_PRIO_IS_REALTIME(rtp.type))
404 return (EPERM);
406 switch (rtp.type) {
407 #ifdef RTP_PRIO_FIFO
408 case RTP_PRIO_FIFO:
409 #endif
410 case RTP_PRIO_REALTIME:
411 case RTP_PRIO_NORMAL:
412 case RTP_PRIO_IDLE:
413 if (rtp.prio > RTP_PRIO_MAX)
414 return (EINVAL);
415 lp->lwp_rtprio = rtp;
416 return (0);
417 default:
418 return (EINVAL);
421 default:
422 return (EINVAL);
427 sys_setrlimit(struct __setrlimit_args *uap)
429 struct rlimit alim;
430 int error;
432 error = copyin(uap->rlp, &alim, sizeof(alim));
433 if (error)
434 return (error);
436 error = kern_setrlimit(uap->which, &alim);
438 return (error);
442 sys_getrlimit(struct __getrlimit_args *uap)
444 struct rlimit lim;
445 int error;
447 error = kern_getrlimit(uap->which, &lim);
449 if (error == 0)
450 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
451 return error;
455 * Transform the running time and tick information in lwp lp's thread into user,
456 * system, and interrupt time usage.
458 * Since we are limited to statclock tick granularity this is a statisical
459 * calculation which will be correct over the long haul, but should not be
460 * expected to measure fine grained deltas.
462 * It is possible to catch a lwp in the midst of being created, so
463 * check whether lwp_thread is NULL or not.
465 void
466 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
468 struct thread *td;
471 * Calculate at the statclock level. YYY if the thread is owned by
472 * another cpu we need to forward the request to the other cpu, or
473 * have a token to interlock the information in order to avoid racing
474 * thread destruction.
476 if ((td = lp->lwp_thread) != NULL) {
477 crit_enter();
478 up->tv_sec = td->td_uticks / 1000000;
479 up->tv_usec = td->td_uticks % 1000000;
480 sp->tv_sec = td->td_sticks / 1000000;
481 sp->tv_usec = td->td_sticks % 1000000;
482 crit_exit();
487 * Aggregate resource statistics of all lwps of a process.
489 * proc.p_ru keeps track of all statistics directly related to a proc. This
490 * consists of RSS usage and nswap information and aggregate numbers for all
491 * former lwps of this proc.
493 * proc.p_cru is the sum of all stats of reaped children.
495 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
496 * packet, scheduler switch or page fault counts, etc. This information gets
497 * added to lwp.lwp_proc.p_ru when the lwp exits.
499 void
500 calcru_proc(struct proc *p, struct rusage *ru)
502 struct timeval upt, spt;
503 long *rip1, *rip2;
504 struct lwp *lp;
506 *ru = p->p_ru;
508 FOREACH_LWP_IN_PROC(lp, p) {
509 calcru(lp, &upt, &spt);
510 timevaladd(&ru->ru_utime, &upt);
511 timevaladd(&ru->ru_stime, &spt);
512 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
513 rip1 <= &ru->ru_last;
514 rip1++, rip2++)
515 *rip1 += *rip2;
520 /* ARGSUSED */
522 sys_getrusage(struct getrusage_args *uap)
524 struct rusage ru;
525 struct rusage *rup;
527 switch (uap->who) {
529 case RUSAGE_SELF:
530 rup = &ru;
531 calcru_proc(curproc, rup);
532 break;
534 case RUSAGE_CHILDREN:
535 rup = &curproc->p_cru;
536 break;
538 default:
539 return (EINVAL);
541 return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
542 sizeof (struct rusage)));
545 void
546 ruadd(struct rusage *ru, struct rusage *ru2)
548 long *ip, *ip2;
549 int i;
551 timevaladd(&ru->ru_utime, &ru2->ru_utime);
552 timevaladd(&ru->ru_stime, &ru2->ru_stime);
553 if (ru->ru_maxrss < ru2->ru_maxrss)
554 ru->ru_maxrss = ru2->ru_maxrss;
555 ip = &ru->ru_first; ip2 = &ru2->ru_first;
556 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
557 *ip++ += *ip2++;
561 * Find the uidinfo structure for a uid. This structure is used to
562 * track the total resource consumption (process count, socket buffer
563 * size, etc.) for the uid and impose limits.
565 void
566 uihashinit(void)
568 spin_init(&uihash_lock);
569 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
572 static struct uidinfo *
573 uilookup(uid_t uid)
575 struct uihashhead *uipp;
576 struct uidinfo *uip;
578 uipp = UIHASH(uid);
579 LIST_FOREACH(uip, uipp, ui_hash) {
580 if (uip->ui_uid == uid)
581 break;
583 return (uip);
586 static struct uidinfo *
587 uicreate(uid_t uid)
589 struct uidinfo *uip, *tmp;
591 * Allocate space and check for a race
593 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
595 * Initialize structure and enter it into the hash table
597 spin_init(&uip->ui_lock);
598 uip->ui_uid = uid;
599 uip->ui_proccnt = 0;
600 uip->ui_sbsize = 0;
601 uip->ui_ref = 1; /* we're returning a ref */
602 uip->ui_posixlocks = 0;
603 varsymset_init(&uip->ui_varsymset, NULL);
606 * Somebody may have already created the uidinfo for this
607 * uid. If so, return that instead.
609 spin_lock_wr(&uihash_lock);
610 tmp = uilookup(uid);
611 if (tmp != NULL) {
612 varsymset_clean(&uip->ui_varsymset);
613 spin_uninit(&uip->ui_lock);
614 FREE(uip, M_UIDINFO);
615 uip = tmp;
616 } else {
617 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
619 spin_unlock_wr(&uihash_lock);
621 return (uip);
624 struct uidinfo *
625 uifind(uid_t uid)
627 struct uidinfo *uip;
629 spin_lock_rd(&uihash_lock);
630 uip = uilookup(uid);
631 if (uip == NULL) {
632 spin_unlock_rd(&uihash_lock);
633 uip = uicreate(uid);
634 } else {
635 uihold(uip);
636 spin_unlock_rd(&uihash_lock);
638 return (uip);
641 static __inline void
642 uifree(struct uidinfo *uip)
644 spin_lock_wr(&uihash_lock);
647 * Note that we're taking a read lock even though we
648 * modify the structure because we know nobody can find
649 * it now that we've locked uihash_lock. If somebody
650 * can get to it through a stored pointer, the reference
651 * count will not be 0 and in that case we don't modify
652 * the struct.
654 spin_lock_rd(&uip->ui_lock);
655 if (uip->ui_ref != 0) {
657 * Someone found the uid and got a ref when we
658 * unlocked. No need to free any more.
660 spin_unlock_rd(&uip->ui_lock);
661 return;
663 if (uip->ui_sbsize != 0)
664 /* XXX no %qd in kernel. Truncate. */
665 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
666 uip->ui_uid, (long)uip->ui_sbsize);
667 if (uip->ui_proccnt != 0)
668 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
669 uip->ui_uid, uip->ui_proccnt);
671 LIST_REMOVE(uip, ui_hash);
672 spin_unlock_wr(&uihash_lock);
673 varsymset_clean(&uip->ui_varsymset);
674 lockuninit(&uip->ui_varsymset.vx_lock);
675 spin_unlock_rd(&uip->ui_lock);
676 spin_uninit(&uip->ui_lock);
677 FREE(uip, M_UIDINFO);
680 void
681 uihold(struct uidinfo *uip)
683 atomic_add_int(&uip->ui_ref, 1);
684 KKASSERT(uip->ui_ref > 0);
687 void
688 uidrop(struct uidinfo *uip)
690 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
691 uifree(uip);
692 } else {
693 KKASSERT(uip->ui_ref > 0);
697 void
698 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
700 uidrop(*puip);
701 *puip = nuip;
705 * Change the count associated with number of processes
706 * a given user is using. When 'max' is 0, don't enforce a limit
709 chgproccnt(struct uidinfo *uip, int diff, int max)
711 int ret;
712 spin_lock_wr(&uip->ui_lock);
713 /* don't allow them to exceed max, but allow subtraction */
714 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
715 ret = 0;
716 } else {
717 uip->ui_proccnt += diff;
718 if (uip->ui_proccnt < 0)
719 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
720 ret = 1;
722 spin_unlock_wr(&uip->ui_lock);
723 return ret;
727 * Change the total socket buffer size a user has used.
730 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
732 rlim_t new;
734 spin_lock_wr(&uip->ui_lock);
735 new = uip->ui_sbsize + to - *hiwat;
736 KKASSERT(new >= 0);
739 * If we are trying to increase the socket buffer size
740 * Scale down the hi water mark when we exceed the user's
741 * allowed socket buffer space.
743 * We can't scale down too much or we will blow up atomic packet
744 * operations.
746 if (to > *hiwat && to > MCLBYTES && new > max) {
747 to = to * max / new;
748 if (to < MCLBYTES)
749 to = MCLBYTES;
751 uip->ui_sbsize = new;
752 *hiwat = to;
753 spin_unlock_wr(&uip->ui_lock);
754 return (1);