mtree/BSD.root.dist: Use spaces.
[dragonfly.git] / sys / kern / kern_resource.c
blob2262caf7517ebe27c4ec8df76adf8b9d8ebe3883
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/file.h>
42 #include <sys/kern_syscall.h>
43 #include <sys/kernel.h>
44 #include <sys/resourcevar.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/priv.h>
48 #include <sys/time.h>
49 #include <sys/lockf.h>
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_map.h>
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
60 static int donice (struct proc *chgp, int n);
61 static int doionice (struct proc *chgp, int n);
63 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
64 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
65 static struct spinlock uihash_lock;
66 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
67 static u_long uihash; /* size of hash table - 1 */
69 static struct uidinfo *uilookup (uid_t uid);
72 * Resource controls and accounting.
75 struct getpriority_info {
76 int low;
77 int who;
80 static int getpriority_callback(struct proc *p, void *data);
83 * MPALMOSTSAFE
85 int
86 sys_getpriority(struct getpriority_args *uap)
88 struct getpriority_info info;
89 thread_t curtd = curthread;
90 struct proc *curp = curproc;
91 struct proc *p;
92 struct pgrp *pg;
93 int low = PRIO_MAX + 1;
94 int error;
96 switch (uap->which) {
97 case PRIO_PROCESS:
98 if (uap->who == 0) {
99 low = curp->p_nice;
100 } else {
101 p = pfind(uap->who);
102 if (p) {
103 lwkt_gettoken_shared(&p->p_token);
104 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
105 low = p->p_nice;
106 lwkt_reltoken(&p->p_token);
107 PRELE(p);
110 break;
111 case PRIO_PGRP:
112 if (uap->who == 0) {
113 lwkt_gettoken_shared(&curp->p_token);
114 pg = curp->p_pgrp;
115 pgref(pg);
116 lwkt_reltoken(&curp->p_token);
117 } else if ((pg = pgfind(uap->who)) == NULL) {
118 break;
119 } /* else ref held from pgfind */
121 lwkt_gettoken_shared(&pg->pg_token);
122 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
123 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
124 p->p_nice < low) {
125 low = p->p_nice;
128 lwkt_reltoken(&pg->pg_token);
129 pgrel(pg);
130 break;
131 case PRIO_USER:
132 if (uap->who == 0)
133 uap->who = curtd->td_ucred->cr_uid;
134 info.low = low;
135 info.who = uap->who;
136 allproc_scan(getpriority_callback, &info, 0);
137 low = info.low;
138 break;
140 default:
141 error = EINVAL;
142 goto done;
144 if (low == PRIO_MAX + 1) {
145 error = ESRCH;
146 goto done;
148 uap->sysmsg_result = low;
149 error = 0;
150 done:
151 return (error);
155 * Figure out the current lowest nice priority for processes owned
156 * by the specified user.
158 static
160 getpriority_callback(struct proc *p, void *data)
162 struct getpriority_info *info = data;
164 lwkt_gettoken_shared(&p->p_token);
165 if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
166 p->p_ucred->cr_uid == info->who &&
167 p->p_nice < info->low) {
168 info->low = p->p_nice;
170 lwkt_reltoken(&p->p_token);
171 return(0);
174 struct setpriority_info {
175 int prio;
176 int who;
177 int error;
178 int found;
181 static int setpriority_callback(struct proc *p, void *data);
184 * MPALMOSTSAFE
187 sys_setpriority(struct setpriority_args *uap)
189 struct setpriority_info info;
190 thread_t curtd = curthread;
191 struct proc *curp = curproc;
192 struct proc *p;
193 struct pgrp *pg;
194 int found = 0, error = 0;
196 switch (uap->which) {
197 case PRIO_PROCESS:
198 if (uap->who == 0) {
199 lwkt_gettoken(&curp->p_token);
200 error = donice(curp, uap->prio);
201 found++;
202 lwkt_reltoken(&curp->p_token);
203 } else {
204 p = pfind(uap->who);
205 if (p) {
206 lwkt_gettoken(&p->p_token);
207 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
208 error = donice(p, uap->prio);
209 found++;
211 lwkt_reltoken(&p->p_token);
212 PRELE(p);
215 break;
216 case PRIO_PGRP:
217 if (uap->who == 0) {
218 lwkt_gettoken_shared(&curp->p_token);
219 pg = curp->p_pgrp;
220 pgref(pg);
221 lwkt_reltoken(&curp->p_token);
222 } else if ((pg = pgfind(uap->who)) == NULL) {
223 break;
224 } /* else ref held from pgfind */
226 lwkt_gettoken(&pg->pg_token);
227 restart:
228 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
229 PHOLD(p);
230 lwkt_gettoken(&p->p_token);
231 if (p->p_pgrp == pg &&
232 PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
233 error = donice(p, uap->prio);
234 found++;
236 lwkt_reltoken(&p->p_token);
237 if (p->p_pgrp != pg) {
238 PRELE(p);
239 goto restart;
241 PRELE(p);
243 lwkt_reltoken(&pg->pg_token);
244 pgrel(pg);
245 break;
246 case PRIO_USER:
247 if (uap->who == 0)
248 uap->who = curtd->td_ucred->cr_uid;
249 info.prio = uap->prio;
250 info.who = uap->who;
251 info.error = 0;
252 info.found = 0;
253 allproc_scan(setpriority_callback, &info, 0);
254 error = info.error;
255 found = info.found;
256 break;
257 default:
258 error = EINVAL;
259 found = 1;
260 break;
263 if (found == 0)
264 error = ESRCH;
265 return (error);
268 static
270 setpriority_callback(struct proc *p, void *data)
272 struct setpriority_info *info = data;
273 int error;
275 lwkt_gettoken(&p->p_token);
276 if (p->p_ucred->cr_uid == info->who &&
277 PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
278 error = donice(p, info->prio);
279 if (error)
280 info->error = error;
281 ++info->found;
283 lwkt_reltoken(&p->p_token);
284 return(0);
288 * Caller must hold chgp->p_token
290 static int
291 donice(struct proc *chgp, int n)
293 struct ucred *cr = curthread->td_ucred;
294 struct lwp *lp;
296 if (cr->cr_uid && cr->cr_ruid &&
297 cr->cr_uid != chgp->p_ucred->cr_uid &&
298 cr->cr_ruid != chgp->p_ucred->cr_uid)
299 return (EPERM);
300 if (n > PRIO_MAX)
301 n = PRIO_MAX;
302 if (n < PRIO_MIN)
303 n = PRIO_MIN;
304 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
305 return (EACCES);
306 chgp->p_nice = n;
307 FOREACH_LWP_IN_PROC(lp, chgp) {
308 LWPHOLD(lp);
309 chgp->p_usched->resetpriority(lp);
310 LWPRELE(lp);
312 return (0);
316 struct ioprio_get_info {
317 int high;
318 int who;
321 static int ioprio_get_callback(struct proc *p, void *data);
324 * MPALMOSTSAFE
327 sys_ioprio_get(struct ioprio_get_args *uap)
329 struct ioprio_get_info info;
330 thread_t curtd = curthread;
331 struct proc *curp = curproc;
332 struct proc *p;
333 struct pgrp *pg;
334 int high = IOPRIO_MIN-2;
335 int error;
337 switch (uap->which) {
338 case PRIO_PROCESS:
339 if (uap->who == 0) {
340 high = curp->p_ionice;
341 } else {
342 p = pfind(uap->who);
343 if (p) {
344 lwkt_gettoken_shared(&p->p_token);
345 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
346 high = p->p_ionice;
347 lwkt_reltoken(&p->p_token);
348 PRELE(p);
351 break;
352 case PRIO_PGRP:
353 if (uap->who == 0) {
354 lwkt_gettoken_shared(&curp->p_token);
355 pg = curp->p_pgrp;
356 pgref(pg);
357 lwkt_reltoken(&curp->p_token);
358 } else if ((pg = pgfind(uap->who)) == NULL) {
359 break;
360 } /* else ref held from pgfind */
362 lwkt_gettoken_shared(&pg->pg_token);
363 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
364 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
365 p->p_nice > high)
366 high = p->p_ionice;
368 lwkt_reltoken(&pg->pg_token);
369 pgrel(pg);
370 break;
371 case PRIO_USER:
372 if (uap->who == 0)
373 uap->who = curtd->td_ucred->cr_uid;
374 info.high = high;
375 info.who = uap->who;
376 allproc_scan(ioprio_get_callback, &info, 0);
377 high = info.high;
378 break;
379 default:
380 error = EINVAL;
381 goto done;
383 if (high == IOPRIO_MIN-2) {
384 error = ESRCH;
385 goto done;
387 uap->sysmsg_result = high;
388 error = 0;
389 done:
390 return (error);
394 * Figure out the current lowest nice priority for processes owned
395 * by the specified user.
397 static
399 ioprio_get_callback(struct proc *p, void *data)
401 struct ioprio_get_info *info = data;
403 lwkt_gettoken_shared(&p->p_token);
404 if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
405 p->p_ucred->cr_uid == info->who &&
406 p->p_ionice > info->high) {
407 info->high = p->p_ionice;
409 lwkt_reltoken(&p->p_token);
410 return(0);
414 struct ioprio_set_info {
415 int prio;
416 int who;
417 int error;
418 int found;
421 static int ioprio_set_callback(struct proc *p, void *data);
424 * MPALMOSTSAFE
427 sys_ioprio_set(struct ioprio_set_args *uap)
429 struct ioprio_set_info info;
430 thread_t curtd = curthread;
431 struct proc *curp = curproc;
432 struct proc *p;
433 struct pgrp *pg;
434 int found = 0, error = 0;
436 switch (uap->which) {
437 case PRIO_PROCESS:
438 if (uap->who == 0) {
439 lwkt_gettoken(&curp->p_token);
440 error = doionice(curp, uap->prio);
441 lwkt_reltoken(&curp->p_token);
442 found++;
443 } else {
444 p = pfind(uap->who);
445 if (p) {
446 lwkt_gettoken(&p->p_token);
447 if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
448 error = doionice(p, uap->prio);
449 found++;
451 lwkt_reltoken(&p->p_token);
452 PRELE(p);
455 break;
456 case PRIO_PGRP:
457 if (uap->who == 0) {
458 lwkt_gettoken_shared(&curp->p_token);
459 pg = curp->p_pgrp;
460 pgref(pg);
461 lwkt_reltoken(&curp->p_token);
462 } else if ((pg = pgfind(uap->who)) == NULL) {
463 break;
464 } /* else ref held from pgfind */
466 lwkt_gettoken(&pg->pg_token);
467 restart:
468 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
469 PHOLD(p);
470 lwkt_gettoken(&p->p_token);
471 if (p->p_pgrp == pg &&
472 PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
473 error = doionice(p, uap->prio);
474 found++;
476 lwkt_reltoken(&p->p_token);
477 if (p->p_pgrp != pg) {
478 PRELE(p);
479 goto restart;
481 PRELE(p);
483 lwkt_reltoken(&pg->pg_token);
484 pgrel(pg);
485 break;
486 case PRIO_USER:
487 if (uap->who == 0)
488 uap->who = curtd->td_ucred->cr_uid;
489 info.prio = uap->prio;
490 info.who = uap->who;
491 info.error = 0;
492 info.found = 0;
493 allproc_scan(ioprio_set_callback, &info, 0);
494 error = info.error;
495 found = info.found;
496 break;
497 default:
498 error = EINVAL;
499 found = 1;
500 break;
503 if (found == 0)
504 error = ESRCH;
505 return (error);
508 static
510 ioprio_set_callback(struct proc *p, void *data)
512 struct ioprio_set_info *info = data;
513 int error;
515 lwkt_gettoken(&p->p_token);
516 if (p->p_ucred->cr_uid == info->who &&
517 PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
518 error = doionice(p, info->prio);
519 if (error)
520 info->error = error;
521 ++info->found;
523 lwkt_reltoken(&p->p_token);
524 return(0);
527 static int
528 doionice(struct proc *chgp, int n)
530 struct ucred *cr = curthread->td_ucred;
532 if (cr->cr_uid && cr->cr_ruid &&
533 cr->cr_uid != chgp->p_ucred->cr_uid &&
534 cr->cr_ruid != chgp->p_ucred->cr_uid)
535 return (EPERM);
536 if (n > IOPRIO_MAX)
537 n = IOPRIO_MAX;
538 if (n < IOPRIO_MIN)
539 n = IOPRIO_MIN;
540 if (n < chgp->p_ionice &&
541 priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
542 return (EACCES);
543 chgp->p_ionice = n;
545 return (0);
550 * MPALMOSTSAFE
553 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
555 struct ucred *cr = curthread->td_ucred;
556 struct proc *p;
557 struct lwp *lp;
558 struct rtprio rtp;
559 int error;
561 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
562 if (error)
563 return error;
564 if (uap->pid < 0)
565 return EINVAL;
567 if (uap->pid == 0) {
568 p = curproc;
569 PHOLD(p);
570 } else {
571 p = pfind(uap->pid);
573 if (p == NULL) {
574 error = ESRCH;
575 goto done;
577 lwkt_gettoken(&p->p_token);
579 if (uap->tid < -1) {
580 error = EINVAL;
581 goto done;
583 if (uap->tid == -1) {
585 * sadly, tid can be 0 so we can't use 0 here
586 * like sys_rtprio()
588 lp = curthread->td_lwp;
589 } else {
590 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
591 if (lp == NULL) {
592 error = ESRCH;
593 goto done;
598 * Make sure that this lwp is not ripped if any of the following
599 * code blocks, e.g. copyout.
601 LWPHOLD(lp);
602 switch (uap->function) {
603 case RTP_LOOKUP:
604 error = copyout(&lp->lwp_rtprio, uap->rtp,
605 sizeof(struct rtprio));
606 break;
607 case RTP_SET:
608 if (cr->cr_uid && cr->cr_ruid &&
609 cr->cr_uid != p->p_ucred->cr_uid &&
610 cr->cr_ruid != p->p_ucred->cr_uid) {
611 error = EPERM;
612 break;
614 /* disallow setting rtprio in most cases if not superuser */
615 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
616 /* can't set someone else's */
617 if (uap->pid) { /* XXX */
618 error = EPERM;
619 break;
621 /* can't set realtime priority */
623 * Realtime priority has to be restricted for reasons which should be
624 * obvious. However, for idle priority, there is a potential for
625 * system deadlock if an idleprio process gains a lock on a resource
626 * that other processes need (and the idleprio process can't run
627 * due to a CPU-bound normal process). Fix me! XXX
629 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
630 error = EPERM;
631 break;
634 switch (rtp.type) {
635 #ifdef RTP_PRIO_FIFO
636 case RTP_PRIO_FIFO:
637 #endif
638 case RTP_PRIO_REALTIME:
639 case RTP_PRIO_NORMAL:
640 case RTP_PRIO_IDLE:
641 if (rtp.prio > RTP_PRIO_MAX) {
642 error = EINVAL;
643 } else {
644 lp->lwp_rtprio = rtp;
645 error = 0;
647 break;
648 default:
649 error = EINVAL;
650 break;
652 break;
653 default:
654 error = EINVAL;
655 break;
657 LWPRELE(lp);
659 done:
660 if (p) {
661 lwkt_reltoken(&p->p_token);
662 PRELE(p);
664 return (error);
668 * Set realtime priority
670 * MPALMOSTSAFE
673 sys_rtprio(struct rtprio_args *uap)
675 struct ucred *cr = curthread->td_ucred;
676 struct proc *p;
677 struct lwp *lp;
678 struct rtprio rtp;
679 int error;
681 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
682 if (error)
683 return (error);
685 if (uap->pid == 0) {
686 p = curproc;
687 PHOLD(p);
688 } else {
689 p = pfind(uap->pid);
692 if (p == NULL) {
693 error = ESRCH;
694 goto done;
696 lwkt_gettoken(&p->p_token);
698 /* XXX lwp */
699 lp = FIRST_LWP_IN_PROC(p);
700 switch (uap->function) {
701 case RTP_LOOKUP:
702 error = copyout(&lp->lwp_rtprio, uap->rtp,
703 sizeof(struct rtprio));
704 break;
705 case RTP_SET:
706 if (cr->cr_uid && cr->cr_ruid &&
707 cr->cr_uid != p->p_ucred->cr_uid &&
708 cr->cr_ruid != p->p_ucred->cr_uid) {
709 error = EPERM;
710 break;
712 /* disallow setting rtprio in most cases if not superuser */
713 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
714 /* can't set someone else's */
715 if (uap->pid) {
716 error = EPERM;
717 break;
719 /* can't set realtime priority */
721 * Realtime priority has to be restricted for reasons which should be
722 * obvious. However, for idle priority, there is a potential for
723 * system deadlock if an idleprio process gains a lock on a resource
724 * that other processes need (and the idleprio process can't run
725 * due to a CPU-bound normal process). Fix me! XXX
727 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
728 error = EPERM;
729 break;
732 switch (rtp.type) {
733 #ifdef RTP_PRIO_FIFO
734 case RTP_PRIO_FIFO:
735 #endif
736 case RTP_PRIO_REALTIME:
737 case RTP_PRIO_NORMAL:
738 case RTP_PRIO_IDLE:
739 if (rtp.prio > RTP_PRIO_MAX) {
740 error = EINVAL;
741 break;
743 lp->lwp_rtprio = rtp;
744 error = 0;
745 break;
746 default:
747 error = EINVAL;
748 break;
750 break;
751 default:
752 error = EINVAL;
753 break;
755 done:
756 if (p) {
757 lwkt_reltoken(&p->p_token);
758 PRELE(p);
761 return (error);
765 sys_setrlimit(struct __setrlimit_args *uap)
767 struct rlimit alim;
768 int error;
770 error = copyin(uap->rlp, &alim, sizeof(alim));
771 if (error)
772 return (error);
774 error = kern_setrlimit(uap->which, &alim);
776 return (error);
780 sys_getrlimit(struct __getrlimit_args *uap)
782 struct rlimit lim;
783 int error;
785 error = kern_getrlimit(uap->which, &lim);
787 if (error == 0)
788 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
789 return error;
793 * Transform the running time and tick information in lwp lp's thread into user,
794 * system, and interrupt time usage.
796 * Since we are limited to statclock tick granularity this is a statisical
797 * calculation which will be correct over the long haul, but should not be
798 * expected to measure fine grained deltas.
800 * It is possible to catch a lwp in the midst of being created, so
801 * check whether lwp_thread is NULL or not.
803 void
804 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
806 struct thread *td;
809 * Calculate at the statclock level. YYY if the thread is owned by
810 * another cpu we need to forward the request to the other cpu, or
811 * have a token to interlock the information in order to avoid racing
812 * thread destruction.
814 if ((td = lp->lwp_thread) != NULL) {
815 crit_enter();
816 up->tv_sec = td->td_uticks / 1000000;
817 up->tv_usec = td->td_uticks % 1000000;
818 sp->tv_sec = td->td_sticks / 1000000;
819 sp->tv_usec = td->td_sticks % 1000000;
820 crit_exit();
825 * Aggregate resource statistics of all lwps of a process.
827 * proc.p_ru keeps track of all statistics directly related to a proc. This
828 * consists of RSS usage and nswap information and aggregate numbers for all
829 * former lwps of this proc.
831 * proc.p_cru is the sum of all stats of reaped children.
833 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
834 * packet, scheduler switch or page fault counts, etc. This information gets
835 * added to lwp.lwp_proc.p_ru when the lwp exits.
837 void
838 calcru_proc(struct proc *p, struct rusage *ru)
840 struct timeval upt, spt;
841 long *rip1, *rip2;
842 struct lwp *lp;
844 *ru = p->p_ru;
846 FOREACH_LWP_IN_PROC(lp, p) {
847 calcru(lp, &upt, &spt);
848 timevaladd(&ru->ru_utime, &upt);
849 timevaladd(&ru->ru_stime, &spt);
850 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
851 rip1 <= &ru->ru_last;
852 rip1++, rip2++)
853 *rip1 += *rip2;
859 * MPALMOSTSAFE
862 sys_getrusage(struct getrusage_args *uap)
864 struct proc *p = curproc;
865 struct rusage ru;
866 struct rusage *rup;
867 int error;
869 lwkt_gettoken(&p->p_token);
871 switch (uap->who) {
872 case RUSAGE_SELF:
873 rup = &ru;
874 calcru_proc(p, rup);
875 error = 0;
876 break;
877 case RUSAGE_CHILDREN:
878 rup = &p->p_cru;
879 error = 0;
880 break;
881 default:
882 error = EINVAL;
883 break;
885 lwkt_reltoken(&p->p_token);
887 if (error == 0)
888 error = copyout(rup, uap->rusage, sizeof(struct rusage));
889 return (error);
892 void
893 ruadd(struct rusage *ru, struct rusage *ru2)
895 long *ip, *ip2;
896 int i;
898 timevaladd(&ru->ru_utime, &ru2->ru_utime);
899 timevaladd(&ru->ru_stime, &ru2->ru_stime);
900 if (ru->ru_maxrss < ru2->ru_maxrss)
901 ru->ru_maxrss = ru2->ru_maxrss;
902 ip = &ru->ru_first; ip2 = &ru2->ru_first;
903 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
904 *ip++ += *ip2++;
908 * Find the uidinfo structure for a uid. This structure is used to
909 * track the total resource consumption (process count, socket buffer
910 * size, etc.) for the uid and impose limits.
912 void
913 uihashinit(void)
915 spin_init(&uihash_lock, "uihashinit");
916 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
920 * NOTE: Must be called with uihash_lock held
922 static struct uidinfo *
923 uilookup(uid_t uid)
925 struct uihashhead *uipp;
926 struct uidinfo *uip;
928 uipp = UIHASH(uid);
929 LIST_FOREACH(uip, uipp, ui_hash) {
930 if (uip->ui_uid == uid)
931 break;
933 return (uip);
937 * Helper function to creat ea uid that could not be found.
938 * This function will properly deal with races.
940 * WARNING! Should only be used by this source file and by the proc0
941 * creation code.
943 struct uidinfo *
944 uicreate(uid_t uid)
946 struct uidinfo *uip, *tmp;
949 * Allocate space and check for a race
951 uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
954 * Initialize structure and enter it into the hash table
956 spin_init(&uip->ui_lock, "uicreate");
957 uip->ui_uid = uid;
958 uip->ui_ref = 1; /* we're returning a ref */
959 varsymset_init(&uip->ui_varsymset, NULL);
960 uip->ui_pcpu = kmalloc(sizeof(*uip->ui_pcpu) * ncpus,
961 M_UIDINFO, M_WAITOK | M_ZERO);
964 * Somebody may have already created the uidinfo for this
965 * uid. If so, return that instead.
967 spin_lock(&uihash_lock);
968 tmp = uilookup(uid);
969 if (tmp != NULL) {
970 uihold(tmp);
971 spin_unlock(&uihash_lock);
973 spin_uninit(&uip->ui_lock);
974 varsymset_clean(&uip->ui_varsymset);
975 kfree(uip->ui_pcpu, M_UIDINFO);
976 kfree(uip, M_UIDINFO);
977 uip = tmp;
978 } else {
979 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
980 spin_unlock(&uihash_lock);
982 return (uip);
986 * Find the uidinfo for a uid, creating one if necessary
988 struct uidinfo *
989 uifind(uid_t uid)
991 struct uidinfo *uip;
992 thread_t td = curthread;
994 if (td->td_ucred) {
995 uip = td->td_ucred->cr_uidinfo;
996 if (uip->ui_uid == uid) {
997 uihold(uip);
998 return uip;
1000 uip = td->td_ucred->cr_ruidinfo;
1001 if (uip->ui_uid == uid) {
1002 uihold(uip);
1003 return uip;
1007 spin_lock_shared(&uihash_lock);
1008 uip = uilookup(uid);
1009 if (uip == NULL) {
1010 spin_unlock_shared(&uihash_lock);
1011 uip = uicreate(uid);
1012 } else {
1013 uihold(uip);
1014 spin_unlock_shared(&uihash_lock);
1016 return (uip);
1020 * Helper funtion to remove a uidinfo whos reference count may
1021 * have transitioned to 0. The reference count is likely 0
1022 * on-call.
1024 static __inline void
1025 uifree(uid_t uid)
1027 struct uidinfo *uip;
1030 * If we are still the only holder after acquiring the uihash_lock
1031 * we can safely unlink the uip and destroy it. Otherwise we lost
1032 * a race and must fail.
1034 spin_lock(&uihash_lock);
1035 uip = uilookup(uid);
1036 if (uip && uip->ui_ref == 0) {
1037 LIST_REMOVE(uip, ui_hash);
1038 spin_unlock(&uihash_lock);
1041 * The uip is now orphaned and we can destroy it at our
1042 * leisure.
1044 if (uip->ui_sbsize != 0)
1045 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1046 uip->ui_uid, (intmax_t)uip->ui_sbsize);
1047 if (uip->ui_proccnt != 0)
1048 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1049 uip->ui_uid, uip->ui_proccnt);
1051 varsymset_clean(&uip->ui_varsymset);
1052 lockuninit(&uip->ui_varsymset.vx_lock);
1053 spin_uninit(&uip->ui_lock);
1054 kfree(uip->ui_pcpu, M_UIDINFO);
1055 kfree(uip, M_UIDINFO);
1056 } else {
1057 spin_unlock(&uihash_lock);
1062 * Bump the ref count
1064 void
1065 uihold(struct uidinfo *uip)
1067 KKASSERT(uip->ui_ref >= 0);
1068 atomic_add_int(&uip->ui_ref, 1);
1072 * Drop the ref count. The last-drop code still needs to remove the
1073 * uidinfo from the hash table which it does by re-looking-it-up.
1075 * NOTE: The uip can be ripped out from under us after the fetchadd.
1077 void
1078 uidrop(struct uidinfo *uip)
1080 uid_t uid;
1082 KKASSERT(uip->ui_ref > 0);
1083 uid = uip->ui_uid;
1084 cpu_ccfence();
1085 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
1086 uifree(uid);
1090 void
1091 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1093 uidrop(*puip);
1094 *puip = nuip;
1098 * Change the count associated with number of processes
1099 * a given user is using.
1101 * NOTE: When 'max' is 0, don't enforce a limit.
1103 * NOTE: Due to concurrency, the count can sometimes exceed the max
1104 * by a small amount.
1107 chgproccnt(struct uidinfo *uip, int diff, int max)
1109 int ret;
1111 /* don't allow them to exceed max, but allow subtraction */
1112 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1113 ret = 0;
1114 } else {
1115 atomic_add_long(&uip->ui_proccnt, diff);
1116 if (uip->ui_proccnt < 0)
1117 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1118 ret = 1;
1120 return ret;
1124 * Change the total socket buffer size a user has used.
1127 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1129 rlim_t new;
1131 rlim_t sbsize;
1133 sbsize = atomic_fetchadd_long(&uip->ui_sbsize, to - *hiwat);
1134 new = sbsize + to - *hiwat;
1135 KKASSERT(new >= 0);
1138 * If we are trying to increase the socket buffer size
1139 * Scale down the hi water mark when we exceed the user's
1140 * allowed socket buffer space.
1142 * We can't scale down too much or we will blow up atomic packet
1143 * operations.
1145 if (to > *hiwat && to > MCLBYTES && new > max) {
1146 to = to * max / new;
1147 if (to < MCLBYTES)
1148 to = MCLBYTES;
1150 *hiwat = to;
1151 return (1);