bridge.4: Add missing .Bl/.El
[dragonfly.git] / sys / kern / kern_resource.c
blobafee45fa82af393a2ae6195b9892222b3b2e4f93
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/file.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/time.h>
56 #include <sys/lockf.h>
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <sys/lock.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
67 static int donice (struct proc *chgp, int n);
68 static int doionice (struct proc *chgp, int n);
70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
72 static struct spinlock uihash_lock;
73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
74 static u_long uihash; /* size of hash table - 1 */
76 static struct uidinfo *uicreate (uid_t uid);
77 static struct uidinfo *uilookup (uid_t uid);
80 * Resource controls and accounting.
83 struct getpriority_info {
84 int low;
85 int who;
88 static int getpriority_callback(struct proc *p, void *data);
91 * MPALMOSTSAFE
93 int
94 sys_getpriority(struct getpriority_args *uap)
96 struct getpriority_info info;
97 struct proc *curp = curproc;
98 struct proc *p;
99 int low = PRIO_MAX + 1;
100 int error;
102 switch (uap->which) {
103 case PRIO_PROCESS:
104 if (uap->who == 0) {
105 p = curp;
106 PHOLD(p);
107 } else {
108 p = pfind(uap->who);
110 if (p) {
111 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
112 low = p->p_nice;
114 PRELE(p);
116 break;
118 case PRIO_PGRP:
120 struct pgrp *pg;
122 if (uap->who == 0) {
123 pg = curp->p_pgrp;
124 pgref(pg);
125 } else if ((pg = pgfind(uap->who)) == NULL) {
126 break;
127 } /* else ref held from pgfind */
129 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
130 if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
131 p->p_nice < low) {
132 low = p->p_nice;
135 pgrel(pg);
136 break;
138 case PRIO_USER:
139 if (uap->who == 0)
140 uap->who = curp->p_ucred->cr_uid;
141 info.low = low;
142 info.who = uap->who;
143 allproc_scan(getpriority_callback, &info);
144 low = info.low;
145 break;
147 default:
148 error = EINVAL;
149 goto done;
151 if (low == PRIO_MAX + 1) {
152 error = ESRCH;
153 goto done;
155 uap->sysmsg_result = low;
156 error = 0;
157 done:
158 return (error);
162 * Figure out the current lowest nice priority for processes owned
163 * by the specified user.
165 static
167 getpriority_callback(struct proc *p, void *data)
169 struct getpriority_info *info = data;
171 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
172 p->p_ucred->cr_uid == info->who &&
173 p->p_nice < info->low) {
174 info->low = p->p_nice;
176 return(0);
179 struct setpriority_info {
180 int prio;
181 int who;
182 int error;
183 int found;
186 static int setpriority_callback(struct proc *p, void *data);
189 * MPALMOSTSAFE
192 sys_setpriority(struct setpriority_args *uap)
194 struct setpriority_info info;
195 struct proc *curp = curproc;
196 struct proc *p;
197 int found = 0, error = 0;
199 lwkt_gettoken(&proc_token);
201 switch (uap->which) {
202 case PRIO_PROCESS:
203 if (uap->who == 0) {
204 p = curp;
205 PHOLD(p);
206 } else {
207 p = pfind(uap->who);
209 if (p) {
210 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
211 error = donice(p, uap->prio);
212 found++;
214 PRELE(p);
216 break;
218 case PRIO_PGRP:
220 struct pgrp *pg;
222 if (uap->who == 0) {
223 pg = curp->p_pgrp;
224 pgref(pg);
225 } else if ((pg = pgfind(uap->who)) == NULL) {
226 break;
227 } /* else ref held from pgfind */
229 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
230 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
231 error = donice(p, uap->prio);
232 found++;
235 pgrel(pg);
236 break;
238 case PRIO_USER:
239 if (uap->who == 0)
240 uap->who = curp->p_ucred->cr_uid;
241 info.prio = uap->prio;
242 info.who = uap->who;
243 info.error = 0;
244 info.found = 0;
245 allproc_scan(setpriority_callback, &info);
246 error = info.error;
247 found = info.found;
248 break;
250 default:
251 error = EINVAL;
252 found = 1;
253 break;
256 lwkt_reltoken(&proc_token);
258 if (found == 0)
259 error = ESRCH;
260 return (error);
263 static
265 setpriority_callback(struct proc *p, void *data)
267 struct setpriority_info *info = data;
268 int error;
270 if (p->p_ucred->cr_uid == info->who &&
271 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
272 error = donice(p, info->prio);
273 if (error)
274 info->error = error;
275 ++info->found;
277 return(0);
280 static int
281 donice(struct proc *chgp, int n)
283 struct proc *curp = curproc;
284 struct ucred *cr = curp->p_ucred;
285 struct lwp *lp;
287 if (cr->cr_uid && cr->cr_ruid &&
288 cr->cr_uid != chgp->p_ucred->cr_uid &&
289 cr->cr_ruid != chgp->p_ucred->cr_uid)
290 return (EPERM);
291 if (n > PRIO_MAX)
292 n = PRIO_MAX;
293 if (n < PRIO_MIN)
294 n = PRIO_MIN;
295 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
296 return (EACCES);
297 chgp->p_nice = n;
298 FOREACH_LWP_IN_PROC(lp, chgp)
299 chgp->p_usched->resetpriority(lp);
300 return (0);
304 struct ioprio_get_info {
305 int high;
306 int who;
309 static int ioprio_get_callback(struct proc *p, void *data);
312 * MPALMOSTSAFE
315 sys_ioprio_get(struct ioprio_get_args *uap)
317 struct ioprio_get_info info;
318 struct proc *curp = curproc;
319 struct proc *p;
320 int high = IOPRIO_MIN-2;
321 int error;
323 lwkt_gettoken(&proc_token);
325 switch (uap->which) {
326 case PRIO_PROCESS:
327 if (uap->who == 0) {
328 p = curp;
329 PHOLD(p);
330 } else {
331 p = pfind(uap->who);
333 if (p) {
334 if (PRISON_CHECK(curp->p_ucred, p->p_ucred))
335 high = p->p_ionice;
336 PRELE(p);
338 break;
340 case PRIO_PGRP:
342 struct pgrp *pg;
344 if (uap->who == 0) {
345 pg = curp->p_pgrp;
346 pgref(pg);
347 } else if ((pg = pgfind(uap->who)) == NULL) {
348 break;
349 } /* else ref held from pgfind */
351 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
352 if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
353 p->p_nice > high)
354 high = p->p_ionice;
356 pgrel(pg);
357 break;
359 case PRIO_USER:
360 if (uap->who == 0)
361 uap->who = curp->p_ucred->cr_uid;
362 info.high = high;
363 info.who = uap->who;
364 allproc_scan(ioprio_get_callback, &info);
365 high = info.high;
366 break;
368 default:
369 error = EINVAL;
370 goto done;
372 if (high == IOPRIO_MIN-2) {
373 error = ESRCH;
374 goto done;
376 uap->sysmsg_result = high;
377 error = 0;
378 done:
379 lwkt_reltoken(&proc_token);
381 return (error);
385 * Figure out the current lowest nice priority for processes owned
386 * by the specified user.
388 static
390 ioprio_get_callback(struct proc *p, void *data)
392 struct ioprio_get_info *info = data;
394 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
395 p->p_ucred->cr_uid == info->who &&
396 p->p_ionice > info->high) {
397 info->high = p->p_ionice;
399 return(0);
403 struct ioprio_set_info {
404 int prio;
405 int who;
406 int error;
407 int found;
410 static int ioprio_set_callback(struct proc *p, void *data);
413 * MPALMOSTSAFE
416 sys_ioprio_set(struct ioprio_set_args *uap)
418 struct ioprio_set_info info;
419 struct proc *curp = curproc;
420 struct proc *p;
421 int found = 0, error = 0;
423 lwkt_gettoken(&proc_token);
425 switch (uap->which) {
426 case PRIO_PROCESS:
427 if (uap->who == 0) {
428 p = curp;
429 PHOLD(p);
430 } else {
431 p = pfind(uap->who);
433 if (p) {
434 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
435 error = doionice(p, uap->prio);
436 found++;
438 PRELE(p);
440 break;
442 case PRIO_PGRP:
444 struct pgrp *pg;
446 if (uap->who == 0) {
447 pg = curp->p_pgrp;
448 pgref(pg);
449 } else if ((pg = pgfind(uap->who)) == NULL) {
450 break;
451 } /* else ref held from pgfind */
453 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
454 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
455 error = doionice(p, uap->prio);
456 found++;
459 pgrel(pg);
460 break;
462 case PRIO_USER:
463 if (uap->who == 0)
464 uap->who = curp->p_ucred->cr_uid;
465 info.prio = uap->prio;
466 info.who = uap->who;
467 info.error = 0;
468 info.found = 0;
469 allproc_scan(ioprio_set_callback, &info);
470 error = info.error;
471 found = info.found;
472 break;
474 default:
475 error = EINVAL;
476 found = 1;
477 break;
480 lwkt_reltoken(&proc_token);
482 if (found == 0)
483 error = ESRCH;
484 return (error);
487 static
489 ioprio_set_callback(struct proc *p, void *data)
491 struct ioprio_set_info *info = data;
492 int error;
494 if (p->p_ucred->cr_uid == info->who &&
495 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
496 error = doionice(p, info->prio);
497 if (error)
498 info->error = error;
499 ++info->found;
501 return(0);
505 doionice(struct proc *chgp, int n)
507 struct proc *curp = curproc;
508 struct ucred *cr = curp->p_ucred;
510 if (cr->cr_uid && cr->cr_ruid &&
511 cr->cr_uid != chgp->p_ucred->cr_uid &&
512 cr->cr_ruid != chgp->p_ucred->cr_uid)
513 return (EPERM);
514 if (n > IOPRIO_MAX)
515 n = IOPRIO_MAX;
516 if (n < IOPRIO_MIN)
517 n = IOPRIO_MIN;
518 if (n < chgp->p_ionice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
519 return (EACCES);
520 chgp->p_ionice = n;
522 return (0);
527 * MPALMOSTSAFE
530 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
532 struct proc *p;
533 struct lwp *lp;
534 struct rtprio rtp;
535 struct ucred *cr = curthread->td_ucred;
536 int error;
538 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
539 if (error)
540 return error;
541 if (uap->pid < 0)
542 return EINVAL;
544 lwkt_gettoken(&proc_token);
546 if (uap->pid == 0) {
547 p = curproc;
548 PHOLD(p);
549 } else {
550 p = pfind(uap->pid);
553 if (p == NULL) {
554 error = ESRCH;
555 goto done;
558 if (uap->tid < -1) {
559 error = EINVAL;
560 goto done;
562 if (uap->tid == -1) {
564 * sadly, tid can be 0 so we can't use 0 here
565 * like sys_rtprio()
567 lp = curthread->td_lwp;
568 } else {
569 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
570 if (lp == NULL) {
571 error = ESRCH;
572 goto done;
576 switch (uap->function) {
577 case RTP_LOOKUP:
578 error = copyout(&lp->lwp_rtprio, uap->rtp,
579 sizeof(struct rtprio));
580 break;
581 case RTP_SET:
582 if (cr->cr_uid && cr->cr_ruid &&
583 cr->cr_uid != p->p_ucred->cr_uid &&
584 cr->cr_ruid != p->p_ucred->cr_uid) {
585 error = EPERM;
586 break;
588 /* disallow setting rtprio in most cases if not superuser */
589 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
590 /* can't set someone else's */
591 if (uap->pid) { /* XXX */
592 error = EPERM;
593 break;
595 /* can't set realtime priority */
597 * Realtime priority has to be restricted for reasons which should be
598 * obvious. However, for idle priority, there is a potential for
599 * system deadlock if an idleprio process gains a lock on a resource
600 * that other processes need (and the idleprio process can't run
601 * due to a CPU-bound normal process). Fix me! XXX
603 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
604 error = EPERM;
605 break;
608 switch (rtp.type) {
609 #ifdef RTP_PRIO_FIFO
610 case RTP_PRIO_FIFO:
611 #endif
612 case RTP_PRIO_REALTIME:
613 case RTP_PRIO_NORMAL:
614 case RTP_PRIO_IDLE:
615 if (rtp.prio > RTP_PRIO_MAX) {
616 error = EINVAL;
617 } else {
618 lp->lwp_rtprio = rtp;
619 error = 0;
621 break;
622 default:
623 error = EINVAL;
624 break;
626 break;
627 default:
628 error = EINVAL;
629 break;
632 done:
633 if (p)
634 PRELE(p);
635 lwkt_reltoken(&proc_token);
637 return (error);
641 * Set realtime priority
643 * MPALMOSTSAFE
646 sys_rtprio(struct rtprio_args *uap)
648 struct proc *p;
649 struct lwp *lp;
650 struct ucred *cr = curthread->td_ucred;
651 struct rtprio rtp;
652 int error;
654 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
655 if (error)
656 return (error);
658 lwkt_gettoken(&proc_token);
660 if (uap->pid == 0) {
661 p = curproc;
662 PHOLD(p);
663 } else {
664 p = pfind(uap->pid);
667 if (p == NULL) {
668 error = ESRCH;
669 goto done;
672 /* XXX lwp */
673 lp = FIRST_LWP_IN_PROC(p);
674 switch (uap->function) {
675 case RTP_LOOKUP:
676 error = copyout(&lp->lwp_rtprio, uap->rtp,
677 sizeof(struct rtprio));
678 break;
679 case RTP_SET:
680 if (cr->cr_uid && cr->cr_ruid &&
681 cr->cr_uid != p->p_ucred->cr_uid &&
682 cr->cr_ruid != p->p_ucred->cr_uid) {
683 error = EPERM;
684 break;
686 /* disallow setting rtprio in most cases if not superuser */
687 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
688 /* can't set someone else's */
689 if (uap->pid) {
690 error = EPERM;
691 break;
693 /* can't set realtime priority */
695 * Realtime priority has to be restricted for reasons which should be
696 * obvious. However, for idle priority, there is a potential for
697 * system deadlock if an idleprio process gains a lock on a resource
698 * that other processes need (and the idleprio process can't run
699 * due to a CPU-bound normal process). Fix me! XXX
701 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
702 error = EPERM;
703 break;
706 switch (rtp.type) {
707 #ifdef RTP_PRIO_FIFO
708 case RTP_PRIO_FIFO:
709 #endif
710 case RTP_PRIO_REALTIME:
711 case RTP_PRIO_NORMAL:
712 case RTP_PRIO_IDLE:
713 if (rtp.prio > RTP_PRIO_MAX) {
714 error = EINVAL;
715 break;
717 lp->lwp_rtprio = rtp;
718 error = 0;
719 break;
720 default:
721 error = EINVAL;
722 break;
724 break;
725 default:
726 error = EINVAL;
727 break;
729 done:
730 if (p)
731 PRELE(p);
732 lwkt_reltoken(&proc_token);
734 return (error);
738 * MPSAFE
741 sys_setrlimit(struct __setrlimit_args *uap)
743 struct rlimit alim;
744 int error;
746 error = copyin(uap->rlp, &alim, sizeof(alim));
747 if (error)
748 return (error);
750 error = kern_setrlimit(uap->which, &alim);
752 return (error);
756 * MPSAFE
759 sys_getrlimit(struct __getrlimit_args *uap)
761 struct rlimit lim;
762 int error;
764 error = kern_getrlimit(uap->which, &lim);
766 if (error == 0)
767 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
768 return error;
772 * Transform the running time and tick information in lwp lp's thread into user,
773 * system, and interrupt time usage.
775 * Since we are limited to statclock tick granularity this is a statisical
776 * calculation which will be correct over the long haul, but should not be
777 * expected to measure fine grained deltas.
779 * It is possible to catch a lwp in the midst of being created, so
780 * check whether lwp_thread is NULL or not.
782 void
783 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
785 struct thread *td;
788 * Calculate at the statclock level. YYY if the thread is owned by
789 * another cpu we need to forward the request to the other cpu, or
790 * have a token to interlock the information in order to avoid racing
791 * thread destruction.
793 if ((td = lp->lwp_thread) != NULL) {
794 crit_enter();
795 up->tv_sec = td->td_uticks / 1000000;
796 up->tv_usec = td->td_uticks % 1000000;
797 sp->tv_sec = td->td_sticks / 1000000;
798 sp->tv_usec = td->td_sticks % 1000000;
799 crit_exit();
804 * Aggregate resource statistics of all lwps of a process.
806 * proc.p_ru keeps track of all statistics directly related to a proc. This
807 * consists of RSS usage and nswap information and aggregate numbers for all
808 * former lwps of this proc.
810 * proc.p_cru is the sum of all stats of reaped children.
812 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
813 * packet, scheduler switch or page fault counts, etc. This information gets
814 * added to lwp.lwp_proc.p_ru when the lwp exits.
816 void
817 calcru_proc(struct proc *p, struct rusage *ru)
819 struct timeval upt, spt;
820 long *rip1, *rip2;
821 struct lwp *lp;
823 *ru = p->p_ru;
825 FOREACH_LWP_IN_PROC(lp, p) {
826 calcru(lp, &upt, &spt);
827 timevaladd(&ru->ru_utime, &upt);
828 timevaladd(&ru->ru_stime, &spt);
829 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
830 rip1 <= &ru->ru_last;
831 rip1++, rip2++)
832 *rip1 += *rip2;
838 * MPALMOSTSAFE
841 sys_getrusage(struct getrusage_args *uap)
843 struct rusage ru;
844 struct rusage *rup;
845 int error;
847 lwkt_gettoken(&proc_token);
849 switch (uap->who) {
850 case RUSAGE_SELF:
851 rup = &ru;
852 calcru_proc(curproc, rup);
853 error = 0;
854 break;
855 case RUSAGE_CHILDREN:
856 rup = &curproc->p_cru;
857 error = 0;
858 break;
859 default:
860 error = EINVAL;
861 break;
863 if (error == 0)
864 error = copyout(rup, uap->rusage, sizeof(struct rusage));
865 lwkt_reltoken(&proc_token);
866 return (error);
869 void
870 ruadd(struct rusage *ru, struct rusage *ru2)
872 long *ip, *ip2;
873 int i;
875 timevaladd(&ru->ru_utime, &ru2->ru_utime);
876 timevaladd(&ru->ru_stime, &ru2->ru_stime);
877 if (ru->ru_maxrss < ru2->ru_maxrss)
878 ru->ru_maxrss = ru2->ru_maxrss;
879 ip = &ru->ru_first; ip2 = &ru2->ru_first;
880 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
881 *ip++ += *ip2++;
885 * Find the uidinfo structure for a uid. This structure is used to
886 * track the total resource consumption (process count, socket buffer
887 * size, etc.) for the uid and impose limits.
889 void
890 uihashinit(void)
892 spin_init(&uihash_lock);
893 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
897 * NOTE: Must be called with uihash_lock held
899 * MPSAFE
901 static struct uidinfo *
902 uilookup(uid_t uid)
904 struct uihashhead *uipp;
905 struct uidinfo *uip;
907 uipp = UIHASH(uid);
908 LIST_FOREACH(uip, uipp, ui_hash) {
909 if (uip->ui_uid == uid)
910 break;
912 return (uip);
916 * Helper function to creat ea uid that could not be found.
917 * This function will properly deal with races.
919 * MPSAFE
921 static struct uidinfo *
922 uicreate(uid_t uid)
924 struct uidinfo *uip, *tmp;
927 * Allocate space and check for a race
929 uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK|M_ZERO);
932 * Initialize structure and enter it into the hash table
934 spin_init(&uip->ui_lock);
935 uip->ui_uid = uid;
936 uip->ui_ref = 1; /* we're returning a ref */
937 varsymset_init(&uip->ui_varsymset, NULL);
940 * Somebody may have already created the uidinfo for this
941 * uid. If so, return that instead.
943 spin_lock(&uihash_lock);
944 tmp = uilookup(uid);
945 if (tmp != NULL) {
946 uihold(tmp);
947 spin_unlock(&uihash_lock);
949 spin_uninit(&uip->ui_lock);
950 varsymset_clean(&uip->ui_varsymset);
951 FREE(uip, M_UIDINFO);
952 uip = tmp;
953 } else {
954 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
955 spin_unlock(&uihash_lock);
957 return (uip);
963 * MPSAFE
965 struct uidinfo *
966 uifind(uid_t uid)
968 struct uidinfo *uip;
970 spin_lock(&uihash_lock);
971 uip = uilookup(uid);
972 if (uip == NULL) {
973 spin_unlock(&uihash_lock);
974 uip = uicreate(uid);
975 } else {
976 uihold(uip);
977 spin_unlock(&uihash_lock);
979 return (uip);
983 * Helper funtion to remove a uidinfo whos reference count is
984 * transitioning from 1->0. The reference count is 1 on call.
986 * Zero is returned on success, otherwise non-zero and the
987 * uiphas not been removed.
989 * MPSAFE
991 static __inline int
992 uifree(struct uidinfo *uip)
995 * If we are still the only holder after acquiring the uihash_lock
996 * we can safely unlink the uip and destroy it. Otherwise we lost
997 * a race and must fail.
999 spin_lock(&uihash_lock);
1000 if (uip->ui_ref != 1) {
1001 spin_unlock(&uihash_lock);
1002 return(-1);
1004 LIST_REMOVE(uip, ui_hash);
1005 spin_unlock(&uihash_lock);
1008 * The uip is now orphaned and we can destroy it at our
1009 * leisure.
1011 if (uip->ui_sbsize != 0)
1012 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1013 uip->ui_uid, (intmax_t)uip->ui_sbsize);
1014 if (uip->ui_proccnt != 0)
1015 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1016 uip->ui_uid, uip->ui_proccnt);
1018 varsymset_clean(&uip->ui_varsymset);
1019 lockuninit(&uip->ui_varsymset.vx_lock);
1020 spin_uninit(&uip->ui_lock);
1021 FREE(uip, M_UIDINFO);
1022 return(0);
1026 * MPSAFE
1028 void
1029 uihold(struct uidinfo *uip)
1031 atomic_add_int(&uip->ui_ref, 1);
1032 KKASSERT(uip->ui_ref >= 0);
1036 * NOTE: It is important for us to not drop the ref count to 0
1037 * because this can cause a 2->0/2->0 race with another
1038 * concurrent dropper. Losing the race in that situation
1039 * can cause uip to become stale for one of the other
1040 * threads.
1042 * MPSAFE
1044 void
1045 uidrop(struct uidinfo *uip)
1047 int ref;
1049 KKASSERT(uip->ui_ref > 0);
1051 for (;;) {
1052 ref = uip->ui_ref;
1053 cpu_ccfence();
1054 if (ref == 1) {
1055 if (uifree(uip) == 0)
1056 break;
1057 } else if (atomic_cmpset_int(&uip->ui_ref, ref, ref - 1)) {
1058 break;
1060 /* else retry */
1064 void
1065 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1067 uidrop(*puip);
1068 *puip = nuip;
1072 * Change the count associated with number of processes
1073 * a given user is using. When 'max' is 0, don't enforce a limit
1076 chgproccnt(struct uidinfo *uip, int diff, int max)
1078 int ret;
1079 spin_lock(&uip->ui_lock);
1080 /* don't allow them to exceed max, but allow subtraction */
1081 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1082 ret = 0;
1083 } else {
1084 uip->ui_proccnt += diff;
1085 if (uip->ui_proccnt < 0)
1086 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1087 ret = 1;
1089 spin_unlock(&uip->ui_lock);
1090 return ret;
1094 * Change the total socket buffer size a user has used.
1097 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1099 rlim_t new;
1101 spin_lock(&uip->ui_lock);
1102 new = uip->ui_sbsize + to - *hiwat;
1103 KKASSERT(new >= 0);
1106 * If we are trying to increase the socket buffer size
1107 * Scale down the hi water mark when we exceed the user's
1108 * allowed socket buffer space.
1110 * We can't scale down too much or we will blow up atomic packet
1111 * operations.
1113 if (to > *hiwat && to > MCLBYTES && new > max) {
1114 to = to * max / new;
1115 if (to < MCLBYTES)
1116 to = MCLBYTES;
1118 uip->ui_sbsize = new;
1119 *hiwat = to;
1120 spin_unlock(&uip->ui_lock);
1121 return (1);