2 * Copyright (c) 2006,2017 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
69 #include <sys/resource.h>
70 #include <sys/spinlock.h>
74 #include <sys/lockf.h>
75 #include <sys/kern_syscall.h>
77 #include <vm/vm_param.h>
79 #include <vm/vm_map.h>
81 #include <machine/pmap.h>
83 #include <sys/spinlock2.h>
85 static MALLOC_DEFINE(M_PLIMIT
, "plimit", "resource limits");
87 static void plimit_copy(struct plimit
*olimit
, struct plimit
*nlimit
);
90 * Initialize proc0's plimit structure. All later plimit structures
91 * are inherited through fork.
94 plimit_init0(struct plimit
*limit
)
99 for (i
= 0; i
< RLIM_NLIMITS
; ++i
) {
100 limit
->pl_rlimit
[i
].rlim_cur
= RLIM_INFINITY
;
101 limit
->pl_rlimit
[i
].rlim_max
= RLIM_INFINITY
;
103 limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
= maxfiles
;
104 limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_max
= maxfiles
;
105 limit
->pl_rlimit
[RLIMIT_NPROC
].rlim_cur
= maxproc
;
106 limit
->pl_rlimit
[RLIMIT_NPROC
].rlim_max
= maxproc
;
107 lim
= ptoa((rlim_t
)vmstats
.v_free_count
);
108 limit
->pl_rlimit
[RLIMIT_RSS
].rlim_max
= lim
;
109 limit
->pl_rlimit
[RLIMIT_MEMLOCK
].rlim_max
= lim
;
110 limit
->pl_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
= lim
/ 3;
111 limit
->p_cpulimit
= RLIM_INFINITY
;
113 spin_init(&limit
->p_spin
, "plimitinit");
117 * Return a plimit for use by a new forked process given the one
118 * contained in the parent process.
121 plimit_fork(struct proc
*p1
)
123 struct plimit
*olimit
= p1
->p_limit
;
124 struct plimit
*nlimit
;
128 * Try to share the parent's plimit structure. If we cannot, make
131 * NOTE: (count) value is field prior to increment.
133 count
= atomic_fetchadd_int(&olimit
->p_refcnt
, 1);
135 if (count
& PLIMITF_EXCLUSIVE
) {
136 if ((count
& PLIMITF_MASK
) == 1 && p1
->p_nthreads
== 1) {
137 atomic_clear_int(&olimit
->p_refcnt
, PLIMITF_EXCLUSIVE
);
139 nlimit
= kmalloc(sizeof(*nlimit
), M_PLIMIT
, M_WAITOK
);
140 plimit_copy(olimit
, nlimit
);
148 * This routine is called when a new LWP is created for a process. We
149 * must force exclusivity to ensure that p->p_limit remains stable.
151 * LWPs share the same process structure so this does not bump refcnt.
154 plimit_lwp_fork(struct proc
*p
)
156 struct plimit
*olimit
= p
->p_limit
;
157 struct plimit
*nlimit
;
160 count
= olimit
->p_refcnt
;
162 if ((count
& PLIMITF_EXCLUSIVE
) == 0) {
164 nlimit
= kmalloc(sizeof(*nlimit
), M_PLIMIT
, M_WAITOK
);
165 plimit_copy(olimit
, nlimit
);
170 atomic_set_int(&olimit
->p_refcnt
, PLIMITF_EXCLUSIVE
);
175 * This routine is called to fixup a process's p_limit structure prior
176 * to it being modified. If index >= 0 the specified modification is also
179 * This routine must make the limit structure exclusive. If we are threaded,
180 * the structure will already be exclusive. A later fork will convert it
181 * back to copy-on-write if possible.
183 * We can count on p->p_limit being stable since if we had created any
184 * threads it will have already been made exclusive.
187 plimit_modify(struct proc
*p
, int index
, struct rlimit
*rlim
)
189 struct plimit
*olimit
;
190 struct plimit
*nlimit
;
197 count
= olimit
->p_refcnt
;
199 if ((count
& PLIMITF_EXCLUSIVE
) == 0) {
201 nlimit
= kmalloc(sizeof(*nlimit
), M_PLIMIT
, M_WAITOK
);
202 plimit_copy(olimit
, nlimit
);
207 atomic_set_int(&olimit
->p_refcnt
, PLIMITF_EXCLUSIVE
);
214 if (p
->p_nthreads
== 1) {
215 p
->p_limit
->pl_rlimit
[index
] = *rlim
;
217 spin_lock(&olimit
->p_spin
);
218 p
->p_limit
->pl_rlimit
[index
].rlim_cur
= rlim
->rlim_cur
;
219 p
->p_limit
->pl_rlimit
[index
].rlim_max
= rlim
->rlim_max
;
220 spin_unlock(&olimit
->p_spin
);
226 * Destroy a process's plimit structure.
229 plimit_free(struct plimit
*limit
)
233 count
= atomic_fetchadd_int(&limit
->p_refcnt
, -1);
235 if ((count
& ~PLIMITF_EXCLUSIVE
) == 1) {
236 limit
->p_refcnt
= -999;
237 kfree(limit
, M_PLIMIT
);
242 * Modify a resource limit (from system call)
245 kern_setrlimit(u_int which
, struct rlimit
*limp
)
247 struct proc
*p
= curproc
;
248 struct plimit
*limit
;
249 struct rlimit
*alimp
;
252 if (which
>= RLIM_NLIMITS
)
256 * We will be modifying a resource, make a copy if necessary.
258 plimit_modify(p
, -1, NULL
);
260 alimp
= &limit
->pl_rlimit
[which
];
263 * Preserve historical bugs by treating negative limits as unsigned.
265 if (limp
->rlim_cur
< 0)
266 limp
->rlim_cur
= RLIM_INFINITY
;
267 if (limp
->rlim_max
< 0)
268 limp
->rlim_max
= RLIM_INFINITY
;
270 spin_lock(&limit
->p_spin
);
271 if (limp
->rlim_cur
> alimp
->rlim_max
||
272 limp
->rlim_max
> alimp
->rlim_max
) {
273 spin_unlock(&limit
->p_spin
);
274 error
= priv_check_cred(p
->p_ucred
, PRIV_PROC_SETRLIMIT
, 0);
278 spin_unlock(&limit
->p_spin
);
280 if (limp
->rlim_cur
> limp
->rlim_max
)
281 limp
->rlim_cur
= limp
->rlim_max
;
285 spin_lock(&limit
->p_spin
);
286 if (limp
->rlim_cur
> RLIM_INFINITY
/ (rlim_t
)1000000)
287 limit
->p_cpulimit
= RLIM_INFINITY
;
289 limit
->p_cpulimit
= (rlim_t
)1000000 * limp
->rlim_cur
;
290 spin_unlock(&limit
->p_spin
);
293 if (limp
->rlim_cur
> maxdsiz
)
294 limp
->rlim_cur
= maxdsiz
;
295 if (limp
->rlim_max
> maxdsiz
)
296 limp
->rlim_max
= maxdsiz
;
300 if (limp
->rlim_cur
> maxssiz
)
301 limp
->rlim_cur
= maxssiz
;
302 if (limp
->rlim_max
> maxssiz
)
303 limp
->rlim_max
= maxssiz
;
305 * Stack is allocated to the max at exec time with only
306 * "rlim_cur" bytes accessible. If stack limit is going
307 * up make more accessible, if going down make inaccessible.
309 spin_lock(&limit
->p_spin
);
310 if (limp
->rlim_cur
!= alimp
->rlim_cur
) {
315 if (limp
->rlim_cur
> alimp
->rlim_cur
) {
317 size
= limp
->rlim_cur
- alimp
->rlim_cur
;
318 addr
= USRSTACK
- limp
->rlim_cur
;
321 size
= alimp
->rlim_cur
- limp
->rlim_cur
;
322 addr
= USRSTACK
- alimp
->rlim_cur
;
324 spin_unlock(&limit
->p_spin
);
325 addr
= trunc_page(addr
);
326 size
= round_page(size
);
327 vm_map_protect(&p
->p_vmspace
->vm_map
,
328 addr
, addr
+size
, prot
, FALSE
);
330 spin_unlock(&limit
->p_spin
);
335 if (limp
->rlim_cur
> maxfilesperproc
)
336 limp
->rlim_cur
= maxfilesperproc
;
337 if (limp
->rlim_max
> maxfilesperproc
)
338 limp
->rlim_max
= maxfilesperproc
;
342 if (limp
->rlim_cur
> maxprocperuid
)
343 limp
->rlim_cur
= maxprocperuid
;
344 if (limp
->rlim_max
> maxprocperuid
)
345 limp
->rlim_max
= maxprocperuid
;
346 if (limp
->rlim_cur
< 1)
348 if (limp
->rlim_max
< 1)
351 case RLIMIT_POSIXLOCKS
:
352 if (limp
->rlim_cur
> maxposixlocksperuid
)
353 limp
->rlim_cur
= maxposixlocksperuid
;
354 if (limp
->rlim_max
> maxposixlocksperuid
)
355 limp
->rlim_max
= maxposixlocksperuid
;
358 spin_lock(&limit
->p_spin
);
360 spin_unlock(&limit
->p_spin
);
365 * The rlimit indexed by which is returned in the second argument.
368 kern_getrlimit(u_int which
, struct rlimit
*limp
)
370 struct proc
*p
= curproc
;
371 struct plimit
*limit
;
374 * p is NULL when kern_getrlimit is called from a
375 * kernel thread. In this case as the calling proc
376 * isn't available we just skip the limit check.
381 if (which
>= RLIM_NLIMITS
)
385 *limp
= p
->p_rlimit
[which
];
391 * Determine if the cpu limit has been reached and return an operations
392 * code for the caller to perform.
395 plimit_testcpulimit(struct plimit
*limit
, u_int64_t ttime
)
401 * Initial tests without the spinlock. This is the fast path.
402 * Any 32/64 bit glitches will fall through and retest with
405 if (limit
->p_cpulimit
== RLIM_INFINITY
)
406 return(PLIMIT_TESTCPU_OK
);
407 if (ttime
<= limit
->p_cpulimit
)
408 return(PLIMIT_TESTCPU_OK
);
410 if (ttime
> limit
->p_cpulimit
) {
411 rlim
= &limit
->pl_rlimit
[RLIMIT_CPU
];
412 if (ttime
/ (rlim_t
)1000000 >= rlim
->rlim_max
+ 5)
413 mode
= PLIMIT_TESTCPU_KILL
;
415 mode
= PLIMIT_TESTCPU_XCPU
;
417 mode
= PLIMIT_TESTCPU_OK
;
424 * Helper routine to copy olimit to nlimit and initialize nlimit for
425 * use. nlimit's reference count will be set to 1 and its exclusive bit
430 plimit_copy(struct plimit
*olimit
, struct plimit
*nlimit
)
434 spin_init(&nlimit
->p_spin
, "plimitcopy");
435 nlimit
->p_refcnt
= 1;
439 * This routine returns the value of a resource, downscaled based on
440 * the processes fork depth and chroot depth (up to 50%). This mechanism
441 * is designed to prevent run-aways from blowing up unrelated processes
442 * running under the same UID.
444 * NOTE: Currently only applicable to RLIMIT_NPROC. We could also limit
445 * file descriptors but we shouldn't have to as these are allocated
449 plimit_getadjvalue(int i
)
451 struct proc
*p
= curproc
;
452 struct plimit
*limit
;
457 v
= limit
->pl_rlimit
[i
].rlim_cur
;
458 if (i
== RLIMIT_NPROC
) {
460 * 10% per chroot (around 1/3% per fork depth), with a
461 * maximum of 50% downscaling of the resource limit.
466 v
-= v
* depth
/ 320;