2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
56 #include <sys/lockf.h>
59 #include <vm/vm_param.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
66 #include <sys/mplock2.h>
68 static int donice (struct proc
*chgp
, int n
);
70 static MALLOC_DEFINE(M_UIDINFO
, "uidinfo", "uidinfo structures");
71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
72 static struct spinlock uihash_lock
;
73 static LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
74 static u_long uihash
; /* size of hash table - 1 */
76 static struct uidinfo
*uicreate (uid_t uid
);
77 static struct uidinfo
*uilookup (uid_t uid
);
80 * Resource controls and accounting.
83 struct getpriority_info
{
88 static int getpriority_callback(struct proc
*p
, void *data
);
94 sys_getpriority(struct getpriority_args
*uap
)
96 struct getpriority_info info
;
97 struct proc
*curp
= curproc
;
99 int low
= PRIO_MAX
+ 1;
104 switch (uap
->which
) {
112 if (!PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
))
123 else if ((pg
= pgfind(uap
->who
)) == NULL
)
125 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
126 if ((PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
) && p
->p_nice
< low
))
133 uap
->who
= curp
->p_ucred
->cr_uid
;
136 allproc_scan(getpriority_callback
, &info
);
144 if (low
== PRIO_MAX
+ 1) {
148 uap
->sysmsg_result
= low
;
156 * Figure out the current lowest nice priority for processes owned
157 * by the specified user.
161 getpriority_callback(struct proc
*p
, void *data
)
163 struct getpriority_info
*info
= data
;
165 if (PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
) &&
166 p
->p_ucred
->cr_uid
== info
->who
&&
167 p
->p_nice
< info
->low
) {
168 info
->low
= p
->p_nice
;
173 struct setpriority_info
{
180 static int setpriority_callback(struct proc
*p
, void *data
);
186 sys_setpriority(struct setpriority_args
*uap
)
188 struct setpriority_info info
;
189 struct proc
*curp
= curproc
;
191 int found
= 0, error
= 0;
195 switch (uap
->which
) {
203 if (!PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
))
205 error
= donice(p
, uap
->prio
);
215 else if ((pg
= pgfind(uap
->who
)) == NULL
)
217 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
218 if (PRISON_CHECK(curp
->p_ucred
, p
->p_ucred
)) {
219 error
= donice(p
, uap
->prio
);
227 uap
->who
= curp
->p_ucred
->cr_uid
;
228 info
.prio
= uap
->prio
;
232 allproc_scan(setpriority_callback
, &info
);
251 setpriority_callback(struct proc
*p
, void *data
)
253 struct setpriority_info
*info
= data
;
256 if (p
->p_ucred
->cr_uid
== info
->who
&&
257 PRISON_CHECK(curproc
->p_ucred
, p
->p_ucred
)) {
258 error
= donice(p
, info
->prio
);
267 donice(struct proc
*chgp
, int n
)
269 struct proc
*curp
= curproc
;
270 struct ucred
*cr
= curp
->p_ucred
;
273 if (cr
->cr_uid
&& cr
->cr_ruid
&&
274 cr
->cr_uid
!= chgp
->p_ucred
->cr_uid
&&
275 cr
->cr_ruid
!= chgp
->p_ucred
->cr_uid
)
281 if (n
< chgp
->p_nice
&& priv_check_cred(cr
, PRIV_SCHED_SETPRIORITY
, 0))
284 FOREACH_LWP_IN_PROC(lp
, chgp
)
285 chgp
->p_usched
->resetpriority(lp
);
293 sys_lwp_rtprio(struct lwp_rtprio_args
*uap
)
295 struct proc
*p
= curproc
;
298 struct ucred
*cr
= curthread
->td_ucred
;
301 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
309 /* curproc already loaded on p */
323 if (uap
->tid
== -1) {
325 * sadly, tid can be 0 so we can't use 0 here
328 lp
= curthread
->td_lwp
;
330 lp
= lwp_rb_tree_RB_LOOKUP(&p
->p_lwp_tree
, uap
->tid
);
337 switch (uap
->function
) {
339 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
340 sizeof(struct rtprio
));
343 if (cr
->cr_uid
&& cr
->cr_ruid
&&
344 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
345 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
349 /* disallow setting rtprio in most cases if not superuser */
350 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
351 /* can't set someone else's */
352 if (uap
->pid
) { /* XXX */
356 /* can't set realtime priority */
358 * Realtime priority has to be restricted for reasons which should be
359 * obvious. However, for idle priority, there is a potential for
360 * system deadlock if an idleprio process gains a lock on a resource
361 * that other processes need (and the idleprio process can't run
362 * due to a CPU-bound normal process). Fix me! XXX
364 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
373 case RTP_PRIO_REALTIME
:
374 case RTP_PRIO_NORMAL
:
376 if (rtp
.prio
> RTP_PRIO_MAX
)
378 lp
->lwp_rtprio
= rtp
;
397 * Set realtime priority
402 sys_rtprio(struct rtprio_args
*uap
)
404 struct proc
*curp
= curproc
;
407 struct ucred
*cr
= curthread
->td_ucred
;
411 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
427 lp
= FIRST_LWP_IN_PROC(p
);
428 switch (uap
->function
) {
430 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
431 sizeof(struct rtprio
));
434 if (cr
->cr_uid
&& cr
->cr_ruid
&&
435 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
436 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
440 /* disallow setting rtprio in most cases if not superuser */
441 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
442 /* can't set someone else's */
447 /* can't set realtime priority */
449 * Realtime priority has to be restricted for reasons which should be
450 * obvious. However, for idle priority, there is a potential for
451 * system deadlock if an idleprio process gains a lock on a resource
452 * that other processes need (and the idleprio process can't run
453 * due to a CPU-bound normal process). Fix me! XXX
455 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
464 case RTP_PRIO_REALTIME
:
465 case RTP_PRIO_NORMAL
:
467 if (rtp
.prio
> RTP_PRIO_MAX
) {
471 lp
->lwp_rtprio
= rtp
;
492 sys_setrlimit(struct __setrlimit_args
*uap
)
497 error
= copyin(uap
->rlp
, &alim
, sizeof(alim
));
501 error
= kern_setrlimit(uap
->which
, &alim
);
510 sys_getrlimit(struct __getrlimit_args
*uap
)
515 error
= kern_getrlimit(uap
->which
, &lim
);
518 error
= copyout(&lim
, uap
->rlp
, sizeof(*uap
->rlp
));
523 * Transform the running time and tick information in lwp lp's thread into user,
524 * system, and interrupt time usage.
526 * Since we are limited to statclock tick granularity this is a statisical
527 * calculation which will be correct over the long haul, but should not be
528 * expected to measure fine grained deltas.
530 * It is possible to catch a lwp in the midst of being created, so
531 * check whether lwp_thread is NULL or not.
534 calcru(struct lwp
*lp
, struct timeval
*up
, struct timeval
*sp
)
539 * Calculate at the statclock level. YYY if the thread is owned by
540 * another cpu we need to forward the request to the other cpu, or
541 * have a token to interlock the information in order to avoid racing
542 * thread destruction.
544 if ((td
= lp
->lwp_thread
) != NULL
) {
546 up
->tv_sec
= td
->td_uticks
/ 1000000;
547 up
->tv_usec
= td
->td_uticks
% 1000000;
548 sp
->tv_sec
= td
->td_sticks
/ 1000000;
549 sp
->tv_usec
= td
->td_sticks
% 1000000;
555 * Aggregate resource statistics of all lwps of a process.
557 * proc.p_ru keeps track of all statistics directly related to a proc. This
558 * consists of RSS usage and nswap information and aggregate numbers for all
559 * former lwps of this proc.
561 * proc.p_cru is the sum of all stats of reaped children.
563 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
564 * packet, scheduler switch or page fault counts, etc. This information gets
565 * added to lwp.lwp_proc.p_ru when the lwp exits.
568 calcru_proc(struct proc
*p
, struct rusage
*ru
)
570 struct timeval upt
, spt
;
576 FOREACH_LWP_IN_PROC(lp
, p
) {
577 calcru(lp
, &upt
, &spt
);
578 timevaladd(&ru
->ru_utime
, &upt
);
579 timevaladd(&ru
->ru_stime
, &spt
);
580 for (rip1
= &ru
->ru_first
, rip2
= &lp
->lwp_ru
.ru_first
;
581 rip1
<= &ru
->ru_last
;
592 sys_getrusage(struct getrusage_args
*uap
)
603 calcru_proc(curproc
, rup
);
606 case RUSAGE_CHILDREN
:
607 rup
= &curproc
->p_cru
;
615 error
= copyout(rup
, uap
->rusage
, sizeof(struct rusage
));
621 ruadd(struct rusage
*ru
, struct rusage
*ru2
)
626 timevaladd(&ru
->ru_utime
, &ru2
->ru_utime
);
627 timevaladd(&ru
->ru_stime
, &ru2
->ru_stime
);
628 if (ru
->ru_maxrss
< ru2
->ru_maxrss
)
629 ru
->ru_maxrss
= ru2
->ru_maxrss
;
630 ip
= &ru
->ru_first
; ip2
= &ru2
->ru_first
;
631 for (i
= &ru
->ru_last
- &ru
->ru_first
; i
>= 0; i
--)
636 * Find the uidinfo structure for a uid. This structure is used to
637 * track the total resource consumption (process count, socket buffer
638 * size, etc.) for the uid and impose limits.
643 spin_init(&uihash_lock
);
644 uihashtbl
= hashinit(maxproc
/ 16, M_UIDINFO
, &uihash
);
648 * NOTE: Must be called with uihash_lock held
652 static struct uidinfo
*
655 struct uihashhead
*uipp
;
659 LIST_FOREACH(uip
, uipp
, ui_hash
) {
660 if (uip
->ui_uid
== uid
)
669 static struct uidinfo
*
672 struct uidinfo
*uip
, *tmp
;
674 * Allocate space and check for a race
676 MALLOC(uip
, struct uidinfo
*, sizeof(*uip
), M_UIDINFO
, M_WAITOK
);
678 * Initialize structure and enter it into the hash table
680 spin_init(&uip
->ui_lock
);
684 uip
->ui_ref
= 1; /* we're returning a ref */
685 uip
->ui_posixlocks
= 0;
686 varsymset_init(&uip
->ui_varsymset
, NULL
);
689 * Somebody may have already created the uidinfo for this
690 * uid. If so, return that instead.
692 spin_lock_wr(&uihash_lock
);
695 varsymset_clean(&uip
->ui_varsymset
);
696 spin_uninit(&uip
->ui_lock
);
697 FREE(uip
, M_UIDINFO
);
700 LIST_INSERT_HEAD(UIHASH(uid
), uip
, ui_hash
);
702 spin_unlock_wr(&uihash_lock
);
715 spin_lock_rd(&uihash_lock
);
718 spin_unlock_rd(&uihash_lock
);
722 spin_unlock_rd(&uihash_lock
);
731 uifree(struct uidinfo
*uip
)
733 spin_lock_wr(&uihash_lock
);
736 * Note that we're taking a read lock even though we
737 * modify the structure because we know nobody can find
738 * it now that we've locked uihash_lock. If somebody
739 * can get to it through a stored pointer, the reference
740 * count will not be 0 and in that case we don't modify
743 spin_lock_rd(&uip
->ui_lock
);
744 if (uip
->ui_ref
!= 0) {
746 * Someone found the uid and got a ref when we
747 * unlocked. No need to free any more.
749 spin_unlock_rd(&uip
->ui_lock
);
752 if (uip
->ui_sbsize
!= 0)
753 /* XXX no %qd in kernel. Truncate. */
754 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
755 uip
->ui_uid
, (long)uip
->ui_sbsize
);
756 if (uip
->ui_proccnt
!= 0)
757 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
758 uip
->ui_uid
, uip
->ui_proccnt
);
760 LIST_REMOVE(uip
, ui_hash
);
761 spin_unlock_wr(&uihash_lock
);
762 varsymset_clean(&uip
->ui_varsymset
);
763 lockuninit(&uip
->ui_varsymset
.vx_lock
);
764 spin_unlock_rd(&uip
->ui_lock
);
765 spin_uninit(&uip
->ui_lock
);
766 FREE(uip
, M_UIDINFO
);
773 uihold(struct uidinfo
*uip
)
775 atomic_add_int(&uip
->ui_ref
, 1);
776 KKASSERT(uip
->ui_ref
> 0);
783 uidrop(struct uidinfo
*uip
)
785 if (atomic_fetchadd_int(&uip
->ui_ref
, -1) == 1) {
788 KKASSERT(uip
->ui_ref
> 0);
793 uireplace(struct uidinfo
**puip
, struct uidinfo
*nuip
)
800 * Change the count associated with number of processes
801 * a given user is using. When 'max' is 0, don't enforce a limit
804 chgproccnt(struct uidinfo
*uip
, int diff
, int max
)
807 spin_lock_wr(&uip
->ui_lock
);
808 /* don't allow them to exceed max, but allow subtraction */
809 if (diff
> 0 && uip
->ui_proccnt
+ diff
> max
&& max
!= 0) {
812 uip
->ui_proccnt
+= diff
;
813 if (uip
->ui_proccnt
< 0)
814 kprintf("negative proccnt for uid = %d\n", uip
->ui_uid
);
817 spin_unlock_wr(&uip
->ui_lock
);
822 * Change the total socket buffer size a user has used.
825 chgsbsize(struct uidinfo
*uip
, u_long
*hiwat
, u_long to
, rlim_t max
)
829 spin_lock_wr(&uip
->ui_lock
);
830 new = uip
->ui_sbsize
+ to
- *hiwat
;
834 * If we are trying to increase the socket buffer size
835 * Scale down the hi water mark when we exceed the user's
836 * allowed socket buffer space.
838 * We can't scale down too much or we will blow up atomic packet
841 if (to
> *hiwat
&& to
> MCLBYTES
&& new > max
) {
846 uip
->ui_sbsize
= new;
848 spin_unlock_wr(&uip
->ui_lock
);