2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
56 #include <sys/lockf.h>
59 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
67 static int donice (struct proc
*chgp
, int n
);
68 static int doionice (struct proc
*chgp
, int n
);
70 static MALLOC_DEFINE(M_UIDINFO
, "uidinfo", "uidinfo structures");
71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
72 static struct spinlock uihash_lock
;
73 static LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
74 static u_long uihash
; /* size of hash table - 1 */
76 static struct uidinfo
*uicreate (uid_t uid
);
77 static struct uidinfo
*uilookup (uid_t uid
);
80 * Resource controls and accounting.
83 struct getpriority_info
{
88 static int getpriority_callback(struct proc
*p
, void *data
);
94 sys_getpriority(struct getpriority_args
*uap
)
96 struct getpriority_info info
;
97 struct proc
*curp
= curproc
;
99 int low
= PRIO_MAX
+ 1;
102 switch (uap
->which
) {
111 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
125 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
127 } /* else ref held from pgfind */
129 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
130 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
) &&
140 uap
->who
= curp
->p_ucred
->cr_uid
;
143 allproc_scan(getpriority_callback
, &info
);
151 if (low
== PRIO_MAX
+ 1) {
155 uap
->sysmsg_result
= low
;
162 * Figure out the current lowest nice priority for processes owned
163 * by the specified user.
167 getpriority_callback(struct proc
*p
, void *data
)
169 struct getpriority_info
*info
= data
;
171 if (PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
) &&
172 p
->p_ucred
->cr_uid
== info
->who
&&
173 p
->p_nice
< info
->low
) {
174 info
->low
= p
->p_nice
;
179 struct setpriority_info
{
186 static int setpriority_callback(struct proc
*p
, void *data
);
192 sys_setpriority(struct setpriority_args
*uap
)
194 struct setpriority_info info
;
195 struct proc
*curp
= curproc
;
197 int found
= 0, error
= 0;
199 lwkt_gettoken(&proc_token
);
201 switch (uap
->which
) {
210 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
211 error
= donice(p
, uap
->prio
);
225 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
227 } /* else ref held from pgfind */
229 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
230 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
231 error
= donice(p
, uap
->prio
);
240 uap
->who
= curp
->p_ucred
->cr_uid
;
241 info
.prio
= uap
->prio
;
245 allproc_scan(setpriority_callback
, &info
);
256 lwkt_reltoken(&proc_token
);
265 setpriority_callback(struct proc
*p
, void *data
)
267 struct setpriority_info
*info
= data
;
270 if (p
->p_ucred
->cr_uid
== info
->who
&&
271 PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
)) {
272 error
= donice(p
, info
->prio
);
281 donice(struct proc
*chgp
, int n
)
283 struct proc
*curp
= curproc
;
284 struct ucred
*cr
= curp
->p_ucred
;
287 if (cr
->cr_uid
&& cr
->cr_ruid
&&
288 cr
->cr_uid
!= chgp
->p_ucred
->cr_uid
&&
289 cr
->cr_ruid
!= chgp
->p_ucred
->cr_uid
)
295 if (n
< chgp
->p_nice
&& priv_check_cred(cr
, PRIV_SCHED_SETPRIORITY
, 0))
298 FOREACH_LWP_IN_PROC(lp
, chgp
)
299 chgp
->p_usched
->resetpriority(lp
);
304 struct ioprio_get_info
{
309 static int ioprio_get_callback(struct proc
*p
, void *data
);
315 sys_ioprio_get(struct ioprio_get_args
*uap
)
317 struct ioprio_get_info info
;
318 struct proc
*curp
= curproc
;
320 int high
= IOPRIO_MIN
-2;
323 lwkt_gettoken(&proc_token
);
325 switch (uap
->which
) {
334 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
))
347 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
349 } /* else ref held from pgfind */
351 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
352 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
) &&
361 uap
->who
= curp
->p_ucred
->cr_uid
;
364 allproc_scan(ioprio_get_callback
, &info
);
372 if (high
== IOPRIO_MIN
-2) {
376 uap
->sysmsg_result
= high
;
379 lwkt_reltoken(&proc_token
);
385 * Figure out the current lowest nice priority for processes owned
386 * by the specified user.
390 ioprio_get_callback(struct proc
*p
, void *data
)
392 struct ioprio_get_info
*info
= data
;
394 if (PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
) &&
395 p
->p_ucred
->cr_uid
== info
->who
&&
396 p
->p_ionice
> info
->high
) {
397 info
->high
= p
->p_ionice
;
403 struct ioprio_set_info
{
410 static int ioprio_set_callback(struct proc
*p
, void *data
);
416 sys_ioprio_set(struct ioprio_set_args
*uap
)
418 struct ioprio_set_info info
;
419 struct proc
*curp
= curproc
;
421 int found
= 0, error
= 0;
423 lwkt_gettoken(&proc_token
);
425 switch (uap
->which
) {
434 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
435 error
= doionice(p
, uap
->prio
);
449 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
451 } /* else ref held from pgfind */
453 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
454 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
455 error
= doionice(p
, uap
->prio
);
464 uap
->who
= curp
->p_ucred
->cr_uid
;
465 info
.prio
= uap
->prio
;
469 allproc_scan(ioprio_set_callback
, &info
);
480 lwkt_reltoken(&proc_token
);
489 ioprio_set_callback(struct proc
*p
, void *data
)
491 struct ioprio_set_info
*info
= data
;
494 if (p
->p_ucred
->cr_uid
== info
->who
&&
495 PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
)) {
496 error
= doionice(p
, info
->prio
);
505 doionice(struct proc
*chgp
, int n
)
507 struct proc
*curp
= curproc
;
508 struct ucred
*cr
= curp
->p_ucred
;
510 if (cr
->cr_uid
&& cr
->cr_ruid
&&
511 cr
->cr_uid
!= chgp
->p_ucred
->cr_uid
&&
512 cr
->cr_ruid
!= chgp
->p_ucred
->cr_uid
)
518 if (n
< chgp
->p_ionice
&& priv_check_cred(cr
, PRIV_SCHED_SETPRIORITY
, 0))
530 sys_lwp_rtprio(struct lwp_rtprio_args
*uap
)
535 struct ucred
*cr
= curthread
->td_ucred
;
538 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
544 lwkt_gettoken(&proc_token
);
562 if (uap
->tid
== -1) {
564 * sadly, tid can be 0 so we can't use 0 here
567 lp
= curthread
->td_lwp
;
569 lp
= lwp_rb_tree_RB_LOOKUP(&p
->p_lwp_tree
, uap
->tid
);
576 switch (uap
->function
) {
578 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
579 sizeof(struct rtprio
));
582 if (cr
->cr_uid
&& cr
->cr_ruid
&&
583 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
584 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
588 /* disallow setting rtprio in most cases if not superuser */
589 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
590 /* can't set someone else's */
591 if (uap
->pid
) { /* XXX */
595 /* can't set realtime priority */
597 * Realtime priority has to be restricted for reasons which should be
598 * obvious. However, for idle priority, there is a potential for
599 * system deadlock if an idleprio process gains a lock on a resource
600 * that other processes need (and the idleprio process can't run
601 * due to a CPU-bound normal process). Fix me! XXX
603 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
612 case RTP_PRIO_REALTIME
:
613 case RTP_PRIO_NORMAL
:
615 if (rtp
.prio
> RTP_PRIO_MAX
) {
618 lp
->lwp_rtprio
= rtp
;
635 lwkt_reltoken(&proc_token
);
641 * Set realtime priority
646 sys_rtprio(struct rtprio_args
*uap
)
650 struct ucred
*cr
= curthread
->td_ucred
;
654 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
658 lwkt_gettoken(&proc_token
);
673 lp
= FIRST_LWP_IN_PROC(p
);
674 switch (uap
->function
) {
676 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
677 sizeof(struct rtprio
));
680 if (cr
->cr_uid
&& cr
->cr_ruid
&&
681 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
682 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
686 /* disallow setting rtprio in most cases if not superuser */
687 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
688 /* can't set someone else's */
693 /* can't set realtime priority */
695 * Realtime priority has to be restricted for reasons which should be
696 * obvious. However, for idle priority, there is a potential for
697 * system deadlock if an idleprio process gains a lock on a resource
698 * that other processes need (and the idleprio process can't run
699 * due to a CPU-bound normal process). Fix me! XXX
701 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
710 case RTP_PRIO_REALTIME
:
711 case RTP_PRIO_NORMAL
:
713 if (rtp
.prio
> RTP_PRIO_MAX
) {
717 lp
->lwp_rtprio
= rtp
;
732 lwkt_reltoken(&proc_token
);
741 sys_setrlimit(struct __setrlimit_args
*uap
)
746 error
= copyin(uap
->rlp
, &alim
, sizeof(alim
));
750 error
= kern_setrlimit(uap
->which
, &alim
);
759 sys_getrlimit(struct __getrlimit_args
*uap
)
764 error
= kern_getrlimit(uap
->which
, &lim
);
767 error
= copyout(&lim
, uap
->rlp
, sizeof(*uap
->rlp
));
772 * Transform the running time and tick information in lwp lp's thread into user,
773 * system, and interrupt time usage.
775 * Since we are limited to statclock tick granularity this is a statisical
776 * calculation which will be correct over the long haul, but should not be
777 * expected to measure fine grained deltas.
779 * It is possible to catch a lwp in the midst of being created, so
780 * check whether lwp_thread is NULL or not.
783 calcru(struct lwp
*lp
, struct timeval
*up
, struct timeval
*sp
)
788 * Calculate at the statclock level. YYY if the thread is owned by
789 * another cpu we need to forward the request to the other cpu, or
790 * have a token to interlock the information in order to avoid racing
791 * thread destruction.
793 if ((td
= lp
->lwp_thread
) != NULL
) {
795 up
->tv_sec
= td
->td_uticks
/ 1000000;
796 up
->tv_usec
= td
->td_uticks
% 1000000;
797 sp
->tv_sec
= td
->td_sticks
/ 1000000;
798 sp
->tv_usec
= td
->td_sticks
% 1000000;
804 * Aggregate resource statistics of all lwps of a process.
806 * proc.p_ru keeps track of all statistics directly related to a proc. This
807 * consists of RSS usage and nswap information and aggregate numbers for all
808 * former lwps of this proc.
810 * proc.p_cru is the sum of all stats of reaped children.
812 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
813 * packet, scheduler switch or page fault counts, etc. This information gets
814 * added to lwp.lwp_proc.p_ru when the lwp exits.
817 calcru_proc(struct proc
*p
, struct rusage
*ru
)
819 struct timeval upt
, spt
;
825 FOREACH_LWP_IN_PROC(lp
, p
) {
826 calcru(lp
, &upt
, &spt
);
827 timevaladd(&ru
->ru_utime
, &upt
);
828 timevaladd(&ru
->ru_stime
, &spt
);
829 for (rip1
= &ru
->ru_first
, rip2
= &lp
->lwp_ru
.ru_first
;
830 rip1
<= &ru
->ru_last
;
841 sys_getrusage(struct getrusage_args
*uap
)
847 lwkt_gettoken(&proc_token
);
852 calcru_proc(curproc
, rup
);
855 case RUSAGE_CHILDREN
:
856 rup
= &curproc
->p_cru
;
864 error
= copyout(rup
, uap
->rusage
, sizeof(struct rusage
));
865 lwkt_reltoken(&proc_token
);
870 ruadd(struct rusage
*ru
, struct rusage
*ru2
)
875 timevaladd(&ru
->ru_utime
, &ru2
->ru_utime
);
876 timevaladd(&ru
->ru_stime
, &ru2
->ru_stime
);
877 if (ru
->ru_maxrss
< ru2
->ru_maxrss
)
878 ru
->ru_maxrss
= ru2
->ru_maxrss
;
879 ip
= &ru
->ru_first
; ip2
= &ru2
->ru_first
;
880 for (i
= &ru
->ru_last
- &ru
->ru_first
; i
>= 0; i
--)
885 * Find the uidinfo structure for a uid. This structure is used to
886 * track the total resource consumption (process count, socket buffer
887 * size, etc.) for the uid and impose limits.
892 spin_init(&uihash_lock
);
893 uihashtbl
= hashinit(maxproc
/ 16, M_UIDINFO
, &uihash
);
897 * NOTE: Must be called with uihash_lock held
901 static struct uidinfo
*
904 struct uihashhead
*uipp
;
908 LIST_FOREACH(uip
, uipp
, ui_hash
) {
909 if (uip
->ui_uid
== uid
)
916 * Helper function to creat ea uid that could not be found.
917 * This function will properly deal with races.
921 static struct uidinfo
*
924 struct uidinfo
*uip
, *tmp
;
927 * Allocate space and check for a race
929 uip
= kmalloc(sizeof(*uip
), M_UIDINFO
, M_WAITOK
|M_ZERO
);
932 * Initialize structure and enter it into the hash table
934 spin_init(&uip
->ui_lock
);
936 uip
->ui_ref
= 1; /* we're returning a ref */
937 varsymset_init(&uip
->ui_varsymset
, NULL
);
940 * Somebody may have already created the uidinfo for this
941 * uid. If so, return that instead.
943 spin_lock(&uihash_lock
);
947 spin_unlock(&uihash_lock
);
949 spin_uninit(&uip
->ui_lock
);
950 varsymset_clean(&uip
->ui_varsymset
);
951 FREE(uip
, M_UIDINFO
);
954 LIST_INSERT_HEAD(UIHASH(uid
), uip
, ui_hash
);
955 spin_unlock(&uihash_lock
);
970 spin_lock(&uihash_lock
);
973 spin_unlock(&uihash_lock
);
977 spin_unlock(&uihash_lock
);
983 * Helper funtion to remove a uidinfo whos reference count is
984 * transitioning from 1->0. The reference count is 1 on call.
986 * Zero is returned on success, otherwise non-zero and the
987 * uiphas not been removed.
992 uifree(struct uidinfo
*uip
)
995 * If we are still the only holder after acquiring the uihash_lock
996 * we can safely unlink the uip and destroy it. Otherwise we lost
997 * a race and must fail.
999 spin_lock(&uihash_lock
);
1000 if (uip
->ui_ref
!= 1) {
1001 spin_unlock(&uihash_lock
);
1004 LIST_REMOVE(uip
, ui_hash
);
1005 spin_unlock(&uihash_lock
);
1008 * The uip is now orphaned and we can destroy it at our
1011 if (uip
->ui_sbsize
!= 0)
1012 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1013 uip
->ui_uid
, (intmax_t)uip
->ui_sbsize
);
1014 if (uip
->ui_proccnt
!= 0)
1015 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1016 uip
->ui_uid
, uip
->ui_proccnt
);
1018 varsymset_clean(&uip
->ui_varsymset
);
1019 lockuninit(&uip
->ui_varsymset
.vx_lock
);
1020 spin_uninit(&uip
->ui_lock
);
1021 FREE(uip
, M_UIDINFO
);
1029 uihold(struct uidinfo
*uip
)
1031 atomic_add_int(&uip
->ui_ref
, 1);
1032 KKASSERT(uip
->ui_ref
>= 0);
1036 * NOTE: It is important for us to not drop the ref count to 0
1037 * because this can cause a 2->0/2->0 race with another
1038 * concurrent dropper. Losing the race in that situation
1039 * can cause uip to become stale for one of the other
1045 uidrop(struct uidinfo
*uip
)
1049 KKASSERT(uip
->ui_ref
> 0);
1055 if (uifree(uip
) == 0)
1057 } else if (atomic_cmpset_int(&uip
->ui_ref
, ref
, ref
- 1)) {
1065 uireplace(struct uidinfo
**puip
, struct uidinfo
*nuip
)
1072 * Change the count associated with number of processes
1073 * a given user is using. When 'max' is 0, don't enforce a limit
1076 chgproccnt(struct uidinfo
*uip
, int diff
, int max
)
1079 spin_lock(&uip
->ui_lock
);
1080 /* don't allow them to exceed max, but allow subtraction */
1081 if (diff
> 0 && uip
->ui_proccnt
+ diff
> max
&& max
!= 0) {
1084 uip
->ui_proccnt
+= diff
;
1085 if (uip
->ui_proccnt
< 0)
1086 kprintf("negative proccnt for uid = %d\n", uip
->ui_uid
);
1089 spin_unlock(&uip
->ui_lock
);
1094 * Change the total socket buffer size a user has used.
1097 chgsbsize(struct uidinfo
*uip
, u_long
*hiwat
, u_long to
, rlim_t max
)
1101 spin_lock(&uip
->ui_lock
);
1102 new = uip
->ui_sbsize
+ to
- *hiwat
;
1106 * If we are trying to increase the socket buffer size
1107 * Scale down the hi water mark when we exceed the user's
1108 * allowed socket buffer space.
1110 * We can't scale down too much or we will blow up atomic packet
1113 if (to
> *hiwat
&& to
> MCLBYTES
&& new > max
) {
1114 to
= to
* max
/ new;
1118 uip
->ui_sbsize
= new;
1120 spin_unlock(&uip
->ui_lock
);