4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/resourcevar.h>
71 #include <sys/vmmeter.h>
72 #include <sys/sysctl.h>
74 #include <sys/kernel.h>
75 #include <sys/unistd.h>
77 #include <machine/limits.h>
80 #include <vm/vm_param.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
90 #include <vm/vm_page2.h>
91 #include <sys/thread2.h>
92 #include <sys/sysref2.h>
95 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
97 * Note: run scheduling should be divorced from the vm system.
99 static void scheduler (void *);
100 SYSINIT(scheduler
, SI_SUB_RUN_SCHEDULER
, SI_ORDER_FIRST
, scheduler
, NULL
)
104 static int swap_debug
= 0;
105 SYSCTL_INT(_vm
, OID_AUTO
, swap_debug
,
106 CTLFLAG_RW
, &swap_debug
, 0, "");
110 static int scheduler_notify
;
112 static void swapout (struct proc
*);
118 kernacc(c_caddr_t addr
, int len
, int rw
)
121 vm_offset_t saddr
, eaddr
;
124 KASSERT((rw
& (~VM_PROT_ALL
)) == 0,
125 ("illegal ``rw'' argument to kernacc (%x)", rw
));
128 * The globaldata space is not part of the kernel_map proper,
129 * check access separately.
131 if (is_globaldata_space((vm_offset_t
)addr
, (vm_offset_t
)(addr
+ len
)))
135 * Nominal kernel memory access - check access via kernel_map.
137 if ((vm_offset_t
)addr
+ len
> kernel_map
.max_offset
||
138 (vm_offset_t
)addr
+ len
< (vm_offset_t
)addr
) {
142 saddr
= trunc_page((vm_offset_t
)addr
);
143 eaddr
= round_page((vm_offset_t
)addr
+ len
);
144 rv
= vm_map_check_protection(&kernel_map
, saddr
, eaddr
, prot
, FALSE
);
153 useracc(c_caddr_t addr
, int len
, int rw
)
158 vm_map_entry_t save_hint
;
161 KASSERT((rw
& (~VM_PROT_ALL
)) == 0,
162 ("illegal ``rw'' argument to useracc (%x)", rw
));
165 * XXX - check separately to disallow access to user area and user
166 * page tables - they are in the map.
168 wrap
= (vm_offset_t
)addr
+ len
;
169 if (wrap
> VM_MAX_USER_ADDRESS
|| wrap
< (vm_offset_t
)addr
) {
172 map
= &curproc
->p_vmspace
->vm_map
;
173 vm_map_lock_read(map
);
175 * We save the map hint, and restore it. Useracc appears to distort
176 * the map hint unnecessarily.
178 save_hint
= map
->hint
;
179 rv
= vm_map_check_protection(map
, trunc_page((vm_offset_t
)addr
),
180 round_page(wrap
), prot
, TRUE
);
181 map
->hint
= save_hint
;
182 vm_map_unlock_read(map
);
191 vslock(caddr_t addr
, u_int len
)
194 vm_map_wire(&curproc
->p_vmspace
->vm_map
,
195 trunc_page((vm_offset_t
)addr
),
196 round_page((vm_offset_t
)addr
+ len
), 0);
204 vsunlock(caddr_t addr
, u_int len
)
207 vm_map_wire(&curproc
->p_vmspace
->vm_map
,
208 trunc_page((vm_offset_t
)addr
),
209 round_page((vm_offset_t
)addr
+ len
),
215 * Implement fork's actions on an address space.
216 * Here we arrange for the address space to be copied or referenced,
217 * allocate a user struct (pcb and kernel stack), then call the
218 * machine-dependent layer to fill those in and make the new process
219 * ready to run. The new process is set up so that it returns directly
220 * to user mode to avoid stack copying and relocation problems.
225 vm_fork(struct proc
*p1
, struct proc
*p2
, int flags
)
227 if ((flags
& RFPROC
) == 0) {
229 * Divorce the memory, if it is shared, essentially
230 * this changes shared memory amongst threads, into
233 if ((flags
& RFMEM
) == 0) {
234 if (p1
->p_vmspace
->vm_sysref
.refcnt
> 1) {
238 cpu_fork(ONLY_LWP_IN_PROC(p1
), NULL
, flags
);
243 vmspace_ref(p1
->p_vmspace
);
244 p2
->p_vmspace
= p1
->p_vmspace
;
247 while (vm_page_count_severe()) {
251 if ((flags
& RFMEM
) == 0) {
252 p2
->p_vmspace
= vmspace_fork(p1
->p_vmspace
);
254 pmap_pinit2(vmspace_pmap(p2
->p_vmspace
));
256 if (p1
->p_vmspace
->vm_shm
)
264 * Set default limits for VM system. Call during proc0's initialization.
266 * Called from the low level boot code only.
269 vm_init_limits(struct proc
*p
)
274 * Set up the initial limits on process VM. Set the maximum resident
275 * set size to be half of (reasonably) available memory. Since this
276 * is a soft limit, it comes into effect only when the system is out
277 * of memory - half of main memory helps to favor smaller processes,
278 * and reduces thrashing of the object cache.
280 p
->p_rlimit
[RLIMIT_STACK
].rlim_cur
= dflssiz
;
281 p
->p_rlimit
[RLIMIT_STACK
].rlim_max
= maxssiz
;
282 p
->p_rlimit
[RLIMIT_DATA
].rlim_cur
= dfldsiz
;
283 p
->p_rlimit
[RLIMIT_DATA
].rlim_max
= maxdsiz
;
284 /* limit the limit to no less than 2MB */
285 rss_limit
= max(vmstats
.v_free_count
, 512);
286 p
->p_rlimit
[RLIMIT_RSS
].rlim_cur
= ptoa(rss_limit
);
287 p
->p_rlimit
[RLIMIT_RSS
].rlim_max
= RLIM_INFINITY
;
291 * Faultin the specified process. Note that the process can be in any
292 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is
298 faultin(struct proc
*p
)
300 if (p
->p_flags
& P_SWAPPEDOUT
) {
302 * The process is waiting in the kernel to return to user
303 * mode but cannot until P_SWAPPEDOUT gets cleared.
305 lwkt_gettoken(&p
->p_token
);
306 p
->p_flags
&= ~(P_SWAPPEDOUT
| P_SWAPWAIT
);
309 kprintf("swapping in %d (%s)\n", p
->p_pid
, p
->p_comm
);
312 lwkt_reltoken(&p
->p_token
);
317 * Kernel initialization eventually falls through to this function,
318 * which is process 0.
320 * This swapin algorithm attempts to swap-in processes only if there
321 * is enough space for them. Of course, if a process waits for a long
322 * time, it will be swapped in anyway.
324 struct scheduler_info
{
329 static int scheduler_callback(struct proc
*p
, void *data
);
332 scheduler(void *dummy
)
334 struct scheduler_info info
;
337 KKASSERT(!IN_CRITICAL_SECT(curthread
));
339 scheduler_notify
= 0;
341 * Don't try to swap anything in if we are low on memory.
343 if (vm_page_count_severe()) {
349 * Look for a good candidate to wake up
353 allproc_scan(scheduler_callback
, &info
);
356 * Nothing to do, back to sleep for at least 1/10 of a second. If
357 * we are woken up, immediately process the next request. If
358 * multiple requests have built up the first is processed
359 * immediately and the rest are staggered.
361 if ((p
= info
.pp
) == NULL
) {
362 tsleep(&proc0
, 0, "nowork", hz
/ 10);
363 if (scheduler_notify
== 0)
364 tsleep(&scheduler_notify
, 0, "nowork", 0);
369 * Fault the selected process in, then wait for a short period of
372 * XXX we need a heuristic to get a measure of system stress and
373 * then adjust our stagger wakeup delay accordingly.
375 lwkt_gettoken(&proc_token
);
379 lwkt_reltoken(&proc_token
);
380 tsleep(&proc0
, 0, "swapin", hz
/ 10);
385 * The caller must hold proc_token.
388 scheduler_callback(struct proc
*p
, void *data
)
390 struct scheduler_info
*info
= data
;
395 if (p
->p_flags
& P_SWAPWAIT
) {
397 FOREACH_LWP_IN_PROC(lp
, p
) {
398 /* XXX lwp might need a different metric */
399 pri
+= lp
->lwp_slptime
;
401 pri
+= p
->p_swtime
- p
->p_nice
* 8;
404 * The more pages paged out while we were swapped,
405 * the more work we have to do to get up and running
406 * again and the lower our wakeup priority.
408 * Each second of sleep time is worth ~1MB
410 lwkt_gettoken(&p
->p_vmspace
->vm_map
.token
);
411 pgs
= vmspace_resident_count(p
->p_vmspace
);
412 if (pgs
< p
->p_vmspace
->vm_swrss
) {
413 pri
-= (p
->p_vmspace
->vm_swrss
- pgs
) /
414 (1024 * 1024 / PAGE_SIZE
);
416 lwkt_reltoken(&p
->p_vmspace
->vm_map
.token
);
419 * If this process is higher priority and there is
420 * enough space, then select this process instead of
421 * the previous selection.
423 if (pri
> info
->ppri
) {
441 if (scheduler_notify
== 0) {
442 scheduler_notify
= 1;
443 wakeup(&scheduler_notify
);
449 #define swappable(p) \
450 (((p)->p_lock == 0) && \
451 ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0)
455 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
457 static int swap_idle_threshold1
= 15;
458 SYSCTL_INT(_vm
, OID_AUTO
, swap_idle_threshold1
,
459 CTLFLAG_RW
, &swap_idle_threshold1
, 0, "Guaranteed process resident time (sec)");
462 * Swap_idle_threshold2 is the time that a process can be idle before
463 * it will be swapped out, if idle swapping is enabled. Default is
466 static int swap_idle_threshold2
= 60;
467 SYSCTL_INT(_vm
, OID_AUTO
, swap_idle_threshold2
,
468 CTLFLAG_RW
, &swap_idle_threshold2
, 0, "Time (sec) a process can idle before being swapped");
471 * Swapout is driven by the pageout daemon. Very simple, we find eligible
472 * procs and mark them as being swapped out. This will cause the kernel
473 * to prefer to pageout those proc's pages first and the procs in question
474 * will not return to user mode until the swapper tells them they can.
476 * If any procs have been sleeping/stopped for at least maxslp seconds,
477 * they are swapped. Else, we swap the longest-sleeping or stopped process,
478 * if any, otherwise the longest-resident process.
481 static int swapout_procs_callback(struct proc
*p
, void *data
);
487 swapout_procs(int action
)
489 allproc_scan(swapout_procs_callback
, &action
);
493 * The caller must hold proc_token
496 swapout_procs_callback(struct proc
*p
, void *data
)
499 int action
= *(int *)data
;
505 lwkt_gettoken(&p
->p_token
);
508 * We only consider active processes.
510 if (p
->p_stat
!= SACTIVE
&& p
->p_stat
!= SSTOP
) {
511 lwkt_reltoken(&p
->p_token
);
515 FOREACH_LWP_IN_PROC(lp
, p
) {
517 * do not swap out a realtime process
519 if (RTP_PRIO_IS_REALTIME(lp
->lwp_rtprio
.type
)) {
520 lwkt_reltoken(&p
->p_token
);
525 * Guarentee swap_idle_threshold time in memory
527 if (lp
->lwp_slptime
< swap_idle_threshold1
) {
528 lwkt_reltoken(&p
->p_token
);
533 * If the system is under memory stress, or if we
534 * are swapping idle processes >= swap_idle_threshold2,
535 * then swap the process out.
537 if (((action
& VM_SWAP_NORMAL
) == 0) &&
538 (((action
& VM_SWAP_IDLE
) == 0) ||
539 (lp
->lwp_slptime
< swap_idle_threshold2
))) {
540 lwkt_reltoken(&p
->p_token
);
544 if (minslp
== -1 || lp
->lwp_slptime
< minslp
)
545 minslp
= lp
->lwp_slptime
;
549 * If the process has been asleep for awhile, swap
552 if ((action
& VM_SWAP_NORMAL
) ||
553 ((action
& VM_SWAP_IDLE
) &&
554 (minslp
> swap_idle_threshold2
))) {
559 * cleanup our reference
561 lwkt_reltoken(&p
->p_token
);
567 * The caller must hold proc_token and p->p_token
570 swapout(struct proc
*p
)
574 kprintf("swapping out %d (%s)\n", p
->p_pid
, p
->p_comm
);
579 * remember the process resident count
581 p
->p_vmspace
->vm_swrss
= vmspace_resident_count(p
->p_vmspace
);
582 p
->p_flags
|= P_SWAPPEDOUT
;
586 #endif /* !NO_SWAPPING */