2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
42 #include <sys/kern_syscall.h>
43 #include <sys/kernel.h>
44 #include <sys/resourcevar.h>
45 #include <sys/malloc.h>
49 #include <sys/lockf.h>
52 #include <vm/vm_param.h>
55 #include <vm/vm_map.h>
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
60 static int donice (struct proc
*chgp
, int n
);
61 static int doionice (struct proc
*chgp
, int n
);
63 static MALLOC_DEFINE(M_UIDINFO
, "uidinfo", "uidinfo structures");
64 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
65 static struct spinlock uihash_lock
;
66 static LIST_HEAD(uihashhead
, uidinfo
) *uihashtbl
;
67 static u_long uihash
; /* size of hash table - 1 */
69 static struct uidinfo
*uilookup (uid_t uid
);
72 * Resource controls and accounting.
75 struct getpriority_info
{
80 static int getpriority_callback(struct proc
*p
, void *data
);
86 sys_getpriority(struct getpriority_args
*uap
)
88 struct getpriority_info info
;
89 thread_t curtd
= curthread
;
90 struct proc
*curp
= curproc
;
93 int low
= PRIO_MAX
+ 1;
103 lwkt_gettoken_shared(&p
->p_token
);
104 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
))
106 lwkt_reltoken(&p
->p_token
);
113 lwkt_gettoken_shared(&curp
->p_token
);
116 lwkt_reltoken(&curp
->p_token
);
117 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
119 } /* else ref held from pgfind */
121 lwkt_gettoken_shared(&pg
->pg_token
);
122 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
123 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
) &&
128 lwkt_reltoken(&pg
->pg_token
);
133 uap
->who
= curtd
->td_ucred
->cr_uid
;
136 allproc_scan(getpriority_callback
, &info
, 0);
144 if (low
== PRIO_MAX
+ 1) {
148 uap
->sysmsg_result
= low
;
155 * Figure out the current lowest nice priority for processes owned
156 * by the specified user.
160 getpriority_callback(struct proc
*p
, void *data
)
162 struct getpriority_info
*info
= data
;
164 lwkt_gettoken_shared(&p
->p_token
);
165 if (PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
) &&
166 p
->p_ucred
->cr_uid
== info
->who
&&
167 p
->p_nice
< info
->low
) {
168 info
->low
= p
->p_nice
;
170 lwkt_reltoken(&p
->p_token
);
174 struct setpriority_info
{
181 static int setpriority_callback(struct proc
*p
, void *data
);
187 sys_setpriority(struct setpriority_args
*uap
)
189 struct setpriority_info info
;
190 thread_t curtd
= curthread
;
191 struct proc
*curp
= curproc
;
194 int found
= 0, error
= 0;
196 switch (uap
->which
) {
199 lwkt_gettoken(&curp
->p_token
);
200 error
= donice(curp
, uap
->prio
);
202 lwkt_reltoken(&curp
->p_token
);
206 lwkt_gettoken(&p
->p_token
);
207 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
)) {
208 error
= donice(p
, uap
->prio
);
211 lwkt_reltoken(&p
->p_token
);
218 lwkt_gettoken_shared(&curp
->p_token
);
221 lwkt_reltoken(&curp
->p_token
);
222 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
224 } /* else ref held from pgfind */
226 lwkt_gettoken(&pg
->pg_token
);
228 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
230 lwkt_gettoken(&p
->p_token
);
231 if (p
->p_pgrp
== pg
&&
232 PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
)) {
233 error
= donice(p
, uap
->prio
);
236 lwkt_reltoken(&p
->p_token
);
237 if (p
->p_pgrp
!= pg
) {
243 lwkt_reltoken(&pg
->pg_token
);
248 uap
->who
= curtd
->td_ucred
->cr_uid
;
249 info
.prio
= uap
->prio
;
253 allproc_scan(setpriority_callback
, &info
, 0);
270 setpriority_callback(struct proc
*p
, void *data
)
272 struct setpriority_info
*info
= data
;
275 lwkt_gettoken(&p
->p_token
);
276 if (p
->p_ucred
->cr_uid
== info
->who
&&
277 PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
278 error
= donice(p
, info
->prio
);
283 lwkt_reltoken(&p
->p_token
);
288 * Caller must hold chgp->p_token
291 donice(struct proc
*chgp
, int n
)
293 struct ucred
*cr
= curthread
->td_ucred
;
296 if (cr
->cr_uid
&& cr
->cr_ruid
&&
297 cr
->cr_uid
!= chgp
->p_ucred
->cr_uid
&&
298 cr
->cr_ruid
!= chgp
->p_ucred
->cr_uid
)
304 if (n
< chgp
->p_nice
&& priv_check_cred(cr
, PRIV_SCHED_SETPRIORITY
, 0))
307 FOREACH_LWP_IN_PROC(lp
, chgp
) {
309 chgp
->p_usched
->resetpriority(lp
);
316 struct ioprio_get_info
{
321 static int ioprio_get_callback(struct proc
*p
, void *data
);
327 sys_ioprio_get(struct ioprio_get_args
*uap
)
329 struct ioprio_get_info info
;
330 thread_t curtd
= curthread
;
331 struct proc
*curp
= curproc
;
334 int high
= IOPRIO_MIN
-2;
337 switch (uap
->which
) {
340 high
= curp
->p_ionice
;
344 lwkt_gettoken_shared(&p
->p_token
);
345 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
))
347 lwkt_reltoken(&p
->p_token
);
354 lwkt_gettoken_shared(&curp
->p_token
);
357 lwkt_reltoken(&curp
->p_token
);
358 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
360 } /* else ref held from pgfind */
362 lwkt_gettoken_shared(&pg
->pg_token
);
363 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
364 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
) &&
368 lwkt_reltoken(&pg
->pg_token
);
373 uap
->who
= curtd
->td_ucred
->cr_uid
;
376 allproc_scan(ioprio_get_callback
, &info
, 0);
383 if (high
== IOPRIO_MIN
-2) {
387 uap
->sysmsg_result
= high
;
394 * Figure out the current lowest nice priority for processes owned
395 * by the specified user.
399 ioprio_get_callback(struct proc
*p
, void *data
)
401 struct ioprio_get_info
*info
= data
;
403 lwkt_gettoken_shared(&p
->p_token
);
404 if (PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
) &&
405 p
->p_ucred
->cr_uid
== info
->who
&&
406 p
->p_ionice
> info
->high
) {
407 info
->high
= p
->p_ionice
;
409 lwkt_reltoken(&p
->p_token
);
414 struct ioprio_set_info
{
421 static int ioprio_set_callback(struct proc
*p
, void *data
);
427 sys_ioprio_set(struct ioprio_set_args
*uap
)
429 struct ioprio_set_info info
;
430 thread_t curtd
= curthread
;
431 struct proc
*curp
= curproc
;
434 int found
= 0, error
= 0;
436 switch (uap
->which
) {
439 lwkt_gettoken(&curp
->p_token
);
440 error
= doionice(curp
, uap
->prio
);
441 lwkt_reltoken(&curp
->p_token
);
446 lwkt_gettoken(&p
->p_token
);
447 if (PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
)) {
448 error
= doionice(p
, uap
->prio
);
451 lwkt_reltoken(&p
->p_token
);
458 lwkt_gettoken_shared(&curp
->p_token
);
461 lwkt_reltoken(&curp
->p_token
);
462 } else if ((pg
= pgfind(uap
->who
)) == NULL
) {
464 } /* else ref held from pgfind */
466 lwkt_gettoken(&pg
->pg_token
);
468 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
470 lwkt_gettoken(&p
->p_token
);
471 if (p
->p_pgrp
== pg
&&
472 PRISON_CHECK(curtd
->td_ucred
, p
->p_ucred
)) {
473 error
= doionice(p
, uap
->prio
);
476 lwkt_reltoken(&p
->p_token
);
477 if (p
->p_pgrp
!= pg
) {
483 lwkt_reltoken(&pg
->pg_token
);
488 uap
->who
= curtd
->td_ucred
->cr_uid
;
489 info
.prio
= uap
->prio
;
493 allproc_scan(ioprio_set_callback
, &info
, 0);
510 ioprio_set_callback(struct proc
*p
, void *data
)
512 struct ioprio_set_info
*info
= data
;
515 lwkt_gettoken(&p
->p_token
);
516 if (p
->p_ucred
->cr_uid
== info
->who
&&
517 PRISON_CHECK(curthread
->td_ucred
, p
->p_ucred
)) {
518 error
= doionice(p
, info
->prio
);
523 lwkt_reltoken(&p
->p_token
);
528 doionice(struct proc
*chgp
, int n
)
530 struct ucred
*cr
= curthread
->td_ucred
;
532 if (cr
->cr_uid
&& cr
->cr_ruid
&&
533 cr
->cr_uid
!= chgp
->p_ucred
->cr_uid
&&
534 cr
->cr_ruid
!= chgp
->p_ucred
->cr_uid
)
540 if (n
< chgp
->p_ionice
&&
541 priv_check_cred(cr
, PRIV_SCHED_SETPRIORITY
, 0))
553 sys_lwp_rtprio(struct lwp_rtprio_args
*uap
)
555 struct ucred
*cr
= curthread
->td_ucred
;
561 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
577 lwkt_gettoken(&p
->p_token
);
583 if (uap
->tid
== -1) {
585 * sadly, tid can be 0 so we can't use 0 here
588 lp
= curthread
->td_lwp
;
590 lp
= lwp_rb_tree_RB_LOOKUP(&p
->p_lwp_tree
, uap
->tid
);
598 * Make sure that this lwp is not ripped if any of the following
599 * code blocks, e.g. copyout.
602 switch (uap
->function
) {
604 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
605 sizeof(struct rtprio
));
608 if (cr
->cr_uid
&& cr
->cr_ruid
&&
609 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
610 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
614 /* disallow setting rtprio in most cases if not superuser */
615 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
616 /* can't set someone else's */
617 if (uap
->pid
) { /* XXX */
621 /* can't set realtime priority */
623 * Realtime priority has to be restricted for reasons which should be
624 * obvious. However, for idle priority, there is a potential for
625 * system deadlock if an idleprio process gains a lock on a resource
626 * that other processes need (and the idleprio process can't run
627 * due to a CPU-bound normal process). Fix me! XXX
629 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
638 case RTP_PRIO_REALTIME
:
639 case RTP_PRIO_NORMAL
:
641 if (rtp
.prio
> RTP_PRIO_MAX
) {
644 lp
->lwp_rtprio
= rtp
;
661 lwkt_reltoken(&p
->p_token
);
668 * Set realtime priority
673 sys_rtprio(struct rtprio_args
*uap
)
675 struct ucred
*cr
= curthread
->td_ucred
;
681 error
= copyin(uap
->rtp
, &rtp
, sizeof(struct rtprio
));
696 lwkt_gettoken(&p
->p_token
);
699 lp
= FIRST_LWP_IN_PROC(p
);
700 switch (uap
->function
) {
702 error
= copyout(&lp
->lwp_rtprio
, uap
->rtp
,
703 sizeof(struct rtprio
));
706 if (cr
->cr_uid
&& cr
->cr_ruid
&&
707 cr
->cr_uid
!= p
->p_ucred
->cr_uid
&&
708 cr
->cr_ruid
!= p
->p_ucred
->cr_uid
) {
712 /* disallow setting rtprio in most cases if not superuser */
713 if (priv_check_cred(cr
, PRIV_SCHED_RTPRIO
, 0)) {
714 /* can't set someone else's */
719 /* can't set realtime priority */
721 * Realtime priority has to be restricted for reasons which should be
722 * obvious. However, for idle priority, there is a potential for
723 * system deadlock if an idleprio process gains a lock on a resource
724 * that other processes need (and the idleprio process can't run
725 * due to a CPU-bound normal process). Fix me! XXX
727 if (RTP_PRIO_IS_REALTIME(rtp
.type
)) {
736 case RTP_PRIO_REALTIME
:
737 case RTP_PRIO_NORMAL
:
739 if (rtp
.prio
> RTP_PRIO_MAX
) {
743 lp
->lwp_rtprio
= rtp
;
757 lwkt_reltoken(&p
->p_token
);
765 sys_setrlimit(struct __setrlimit_args
*uap
)
770 error
= copyin(uap
->rlp
, &alim
, sizeof(alim
));
774 error
= kern_setrlimit(uap
->which
, &alim
);
780 sys_getrlimit(struct __getrlimit_args
*uap
)
785 error
= kern_getrlimit(uap
->which
, &lim
);
788 error
= copyout(&lim
, uap
->rlp
, sizeof(*uap
->rlp
));
793 * Transform the running time and tick information in lwp lp's thread into user,
794 * system, and interrupt time usage.
796 * Since we are limited to statclock tick granularity this is a statisical
797 * calculation which will be correct over the long haul, but should not be
798 * expected to measure fine grained deltas.
800 * It is possible to catch a lwp in the midst of being created, so
801 * check whether lwp_thread is NULL or not.
804 calcru(struct lwp
*lp
, struct timeval
*up
, struct timeval
*sp
)
809 * Calculate at the statclock level. YYY if the thread is owned by
810 * another cpu we need to forward the request to the other cpu, or
811 * have a token to interlock the information in order to avoid racing
812 * thread destruction.
814 if ((td
= lp
->lwp_thread
) != NULL
) {
816 up
->tv_sec
= td
->td_uticks
/ 1000000;
817 up
->tv_usec
= td
->td_uticks
% 1000000;
818 sp
->tv_sec
= td
->td_sticks
/ 1000000;
819 sp
->tv_usec
= td
->td_sticks
% 1000000;
825 * Aggregate resource statistics of all lwps of a process.
827 * proc.p_ru keeps track of all statistics directly related to a proc. This
828 * consists of RSS usage and nswap information and aggregate numbers for all
829 * former lwps of this proc.
831 * proc.p_cru is the sum of all stats of reaped children.
833 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
834 * packet, scheduler switch or page fault counts, etc. This information gets
835 * added to lwp.lwp_proc.p_ru when the lwp exits.
838 calcru_proc(struct proc
*p
, struct rusage
*ru
)
840 struct timeval upt
, spt
;
846 FOREACH_LWP_IN_PROC(lp
, p
) {
847 calcru(lp
, &upt
, &spt
);
848 timevaladd(&ru
->ru_utime
, &upt
);
849 timevaladd(&ru
->ru_stime
, &spt
);
850 for (rip1
= &ru
->ru_first
, rip2
= &lp
->lwp_ru
.ru_first
;
851 rip1
<= &ru
->ru_last
;
862 sys_getrusage(struct getrusage_args
*uap
)
864 struct proc
*p
= curproc
;
869 lwkt_gettoken(&p
->p_token
);
877 case RUSAGE_CHILDREN
:
885 lwkt_reltoken(&p
->p_token
);
888 error
= copyout(rup
, uap
->rusage
, sizeof(struct rusage
));
893 ruadd(struct rusage
*ru
, struct rusage
*ru2
)
898 timevaladd(&ru
->ru_utime
, &ru2
->ru_utime
);
899 timevaladd(&ru
->ru_stime
, &ru2
->ru_stime
);
900 if (ru
->ru_maxrss
< ru2
->ru_maxrss
)
901 ru
->ru_maxrss
= ru2
->ru_maxrss
;
902 ip
= &ru
->ru_first
; ip2
= &ru2
->ru_first
;
903 for (i
= &ru
->ru_last
- &ru
->ru_first
; i
>= 0; i
--)
908 * Find the uidinfo structure for a uid. This structure is used to
909 * track the total resource consumption (process count, socket buffer
910 * size, etc.) for the uid and impose limits.
915 spin_init(&uihash_lock
, "uihashinit");
916 uihashtbl
= hashinit(maxproc
/ 16, M_UIDINFO
, &uihash
);
920 * NOTE: Must be called with uihash_lock held
922 static struct uidinfo
*
925 struct uihashhead
*uipp
;
929 LIST_FOREACH(uip
, uipp
, ui_hash
) {
930 if (uip
->ui_uid
== uid
)
937 * Helper function to creat ea uid that could not be found.
938 * This function will properly deal with races.
940 * WARNING! Should only be used by this source file and by the proc0
946 struct uidinfo
*uip
, *tmp
;
949 * Allocate space and check for a race
951 uip
= kmalloc(sizeof(*uip
), M_UIDINFO
, M_WAITOK
|M_ZERO
);
954 * Initialize structure and enter it into the hash table
956 spin_init(&uip
->ui_lock
, "uicreate");
958 uip
->ui_ref
= 1; /* we're returning a ref */
959 varsymset_init(&uip
->ui_varsymset
, NULL
);
962 * Somebody may have already created the uidinfo for this
963 * uid. If so, return that instead.
965 spin_lock(&uihash_lock
);
969 spin_unlock(&uihash_lock
);
971 spin_uninit(&uip
->ui_lock
);
972 varsymset_clean(&uip
->ui_varsymset
);
973 kfree(uip
, M_UIDINFO
);
976 LIST_INSERT_HEAD(UIHASH(uid
), uip
, ui_hash
);
977 spin_unlock(&uihash_lock
);
983 * Find the uidinfo for a uid, creating one if necessary
989 thread_t td
= curthread
;
992 uip
= td
->td_ucred
->cr_uidinfo
;
993 if (uip
->ui_uid
== uid
) {
997 uip
= td
->td_ucred
->cr_ruidinfo
;
998 if (uip
->ui_uid
== uid
) {
1004 spin_lock_shared(&uihash_lock
);
1005 uip
= uilookup(uid
);
1007 spin_unlock_shared(&uihash_lock
);
1008 uip
= uicreate(uid
);
1011 spin_unlock_shared(&uihash_lock
);
1017 * Helper funtion to remove a uidinfo whos reference count may
1018 * have transitioned to 0. The reference count is likely 0
1021 static __inline
void
1024 struct uidinfo
*uip
;
1027 * If we are still the only holder after acquiring the uihash_lock
1028 * we can safely unlink the uip and destroy it. Otherwise we lost
1029 * a race and must fail.
1031 spin_lock(&uihash_lock
);
1032 uip
= uilookup(uid
);
1033 if (uip
&& uip
->ui_ref
== 0) {
1034 LIST_REMOVE(uip
, ui_hash
);
1035 spin_unlock(&uihash_lock
);
1038 * The uip is now orphaned and we can destroy it at our
1041 if (uip
->ui_sbsize
!= 0)
1042 kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1043 uip
->ui_uid
, (intmax_t)uip
->ui_sbsize
);
1044 if (uip
->ui_proccnt
!= 0)
1045 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1046 uip
->ui_uid
, uip
->ui_proccnt
);
1048 varsymset_clean(&uip
->ui_varsymset
);
1049 lockuninit(&uip
->ui_varsymset
.vx_lock
);
1050 spin_uninit(&uip
->ui_lock
);
1051 kfree(uip
, M_UIDINFO
);
1053 spin_unlock(&uihash_lock
);
1058 * Bump the ref count
1061 uihold(struct uidinfo
*uip
)
1063 KKASSERT(uip
->ui_ref
>= 0);
1064 atomic_add_int(&uip
->ui_ref
, 1);
1068 * Drop the ref count. The last-drop code still needs to remove the
1069 * uidinfo from the hash table which it does by re-looking-it-up.
1071 * NOTE: The uip can be ripped out from under us after the fetchadd.
1074 uidrop(struct uidinfo
*uip
)
1078 KKASSERT(uip
->ui_ref
> 0);
1081 if (atomic_fetchadd_int(&uip
->ui_ref
, -1) == 1) {
1087 uireplace(struct uidinfo
**puip
, struct uidinfo
*nuip
)
1094 * Change the count associated with number of processes
1095 * a given user is using.
1097 * NOTE: When 'max' is 0, don't enforce a limit.
1099 * NOTE: Due to concurrency, the count can sometimes exceed the max
1100 * by a small amount.
1103 chgproccnt(struct uidinfo
*uip
, int diff
, int max
)
1107 /* don't allow them to exceed max, but allow subtraction */
1108 if (diff
> 0 && uip
->ui_proccnt
+ diff
> max
&& max
!= 0) {
1111 atomic_add_long(&uip
->ui_proccnt
, diff
);
1112 if (uip
->ui_proccnt
< 0)
1113 kprintf("negative proccnt for uid = %d\n", uip
->ui_uid
);
1120 * Change the total socket buffer size a user has used.
1123 chgsbsize(struct uidinfo
*uip
, u_long
*hiwat
, u_long to
, rlim_t max
)
1129 sbsize
= atomic_fetchadd_long(&uip
->ui_sbsize
, to
- *hiwat
);
1130 new = sbsize
+ to
- *hiwat
;
1134 * If we are trying to increase the socket buffer size
1135 * Scale down the hi water mark when we exceed the user's
1136 * allowed socket buffer space.
1138 * We can't scale down too much or we will blow up atomic packet
1141 if (to
> *hiwat
&& to
> MCLBYTES
&& new > max
) {
1142 to
= to
* max
/ new;