Remove some more old openssl(1) related manual pages and MLINKS.
[dragonfly.git] / sys / vm / vm_glue.c
blobcf920ddb53a55c23ba71c3d634c1e7296b759fe8
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
58 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
61 #include "opt_vm.h"
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/resourcevar.h>
67 #include <sys/buf.h>
68 #include <sys/shm.h>
69 #include <sys/vmmeter.h>
70 #include <sys/sysctl.h>
72 #include <sys/kernel.h>
73 #include <sys/unistd.h>
75 #include <machine/limits.h>
76 #include <machine/vmm.h>
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <sys/lock.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_extern.h>
88 #include <sys/user.h>
89 #include <vm/vm_page2.h>
90 #include <sys/thread2.h>
93 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
95 * Note: run scheduling should be divorced from the vm system.
97 static void scheduler (void *);
98 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
100 #ifdef INVARIANTS
102 static int swap_debug = 0;
103 SYSCTL_INT(_vm, OID_AUTO, swap_debug,
104 CTLFLAG_RW, &swap_debug, 0, "");
106 #endif
108 static int scheduler_notify;
110 static void swapout (struct proc *);
113 * No requirements.
116 kernacc(c_caddr_t addr, int len, int rw)
118 boolean_t rv;
119 vm_offset_t saddr, eaddr;
120 vm_prot_t prot;
122 KASSERT((rw & (~VM_PROT_ALL)) == 0,
123 ("illegal ``rw'' argument to kernacc (%x)", rw));
126 * The globaldata space is not part of the kernel_map proper,
127 * check access separately.
129 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
130 return (TRUE);
133 * Nominal kernel memory access - check access via kernel_map.
135 if ((vm_offset_t)addr + len > kernel_map.max_offset ||
136 (vm_offset_t)addr + len < (vm_offset_t)addr) {
137 return (FALSE);
139 prot = rw;
140 saddr = trunc_page((vm_offset_t)addr);
141 eaddr = round_page((vm_offset_t)addr + len);
142 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE);
144 return (rv == TRUE);
148 * No requirements.
151 useracc(c_caddr_t addr, int len, int rw)
153 boolean_t rv;
154 vm_prot_t prot;
155 vm_map_t map;
156 vm_offset_t wrap;
157 vm_offset_t gpa;
159 KASSERT((rw & (~VM_PROT_ALL)) == 0,
160 ("illegal ``rw'' argument to useracc (%x)", rw));
161 prot = rw;
163 if (curthread->td_vmm) {
164 if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr))
165 panic("%s: could not get GPA\n", __func__);
166 addr = (c_caddr_t) gpa;
170 * XXX - check separately to disallow access to user area and user
171 * page tables - they are in the map.
173 wrap = (vm_offset_t)addr + len;
174 if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
175 return (FALSE);
177 map = &curproc->p_vmspace->vm_map;
178 vm_map_lock_read(map);
180 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
181 round_page(wrap), prot, TRUE);
182 vm_map_unlock_read(map);
184 return (rv == TRUE);
188 * No requirements.
190 void
191 vslock(caddr_t addr, u_int len)
193 if (len) {
194 vm_map_wire(&curproc->p_vmspace->vm_map,
195 trunc_page((vm_offset_t)addr),
196 round_page((vm_offset_t)addr + len), 0);
201 * No requirements.
203 void
204 vsunlock(caddr_t addr, u_int len)
206 if (len) {
207 vm_map_wire(&curproc->p_vmspace->vm_map,
208 trunc_page((vm_offset_t)addr),
209 round_page((vm_offset_t)addr + len),
210 KM_PAGEABLE);
215 * Implement fork's actions on an address space.
216 * Here we arrange for the address space to be copied or referenced,
217 * allocate a user struct (pcb and kernel stack), then call the
218 * machine-dependent layer to fill those in and make the new process
219 * ready to run. The new process is set up so that it returns directly
220 * to user mode to avoid stack copying and relocation problems.
222 * No requirements.
224 void
225 vm_fork(struct proc *p1, struct proc *p2, int flags)
227 if ((flags & RFPROC) == 0) {
229 * Divorce the memory, if it is shared, essentially
230 * this changes shared memory amongst threads, into
231 * COW locally.
233 if ((flags & RFMEM) == 0) {
234 if (vmspace_getrefs(p1->p_vmspace) > 1) {
235 vmspace_unshare(p1);
238 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
239 return;
242 if (flags & RFMEM) {
243 vmspace_ref(p1->p_vmspace);
244 p2->p_vmspace = p1->p_vmspace;
247 while (vm_page_count_severe()) {
248 vm_wait(0);
251 if ((flags & RFMEM) == 0) {
252 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
254 pmap_pinit2(vmspace_pmap(p2->p_vmspace));
256 if (p1->p_vmspace->vm_shm)
257 shmfork(p1, p2);
260 pmap_init_proc(p2);
264 * Set default limits for VM system. Call during proc0's initialization.
266 * Called from the low level boot code only.
268 void
269 vm_init_limits(struct proc *p)
271 int rss_limit;
274 * Set up the initial limits on process VM. Set the maximum resident
275 * set size to be half of (reasonably) available memory. Since this
276 * is a soft limit, it comes into effect only when the system is out
277 * of memory - half of main memory helps to favor smaller processes,
278 * and reduces thrashing of the object cache.
280 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
281 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
282 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
283 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
284 /* limit the limit to no less than 2MB */
285 rss_limit = max(vmstats.v_free_count, 512);
286 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
287 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
291 * Faultin the specified process. Note that the process can be in any
292 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is
293 * sleeping.
295 * No requirements.
297 void
298 faultin(struct proc *p)
300 if (p->p_flags & P_SWAPPEDOUT) {
302 * The process is waiting in the kernel to return to user
303 * mode but cannot until P_SWAPPEDOUT gets cleared.
305 lwkt_gettoken(&p->p_token);
306 p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT);
307 #ifdef INVARIANTS
308 if (swap_debug)
309 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm);
310 #endif
311 wakeup(p);
312 lwkt_reltoken(&p->p_token);
317 * Kernel initialization eventually falls through to this function,
318 * which is process 0.
320 * This swapin algorithm attempts to swap-in processes only if there
321 * is enough space for them. Of course, if a process waits for a long
322 * time, it will be swapped in anyway.
324 struct scheduler_info {
325 struct proc *pp;
326 int ppri;
329 static int scheduler_callback(struct proc *p, void *data);
331 static void
332 scheduler(void *dummy)
334 struct scheduler_info info;
335 struct proc *p;
337 KKASSERT(!IN_CRITICAL_SECT(curthread));
338 loop:
339 scheduler_notify = 0;
341 * Don't try to swap anything in if we are low on memory.
343 if (vm_page_count_severe()) {
344 vm_wait(0);
345 goto loop;
349 * Look for a good candidate to wake up
351 * XXX we should make the schedule thread pcpu and then use a
352 * segmented allproc scan.
354 info.pp = NULL;
355 info.ppri = INT_MIN;
356 allproc_scan(scheduler_callback, &info, 0);
359 * Nothing to do, back to sleep for at least 1/10 of a second. If
360 * we are woken up, immediately process the next request. If
361 * multiple requests have built up the first is processed
362 * immediately and the rest are staggered.
364 if ((p = info.pp) == NULL) {
365 tsleep(&proc0, 0, "nowork", hz / 10);
366 if (scheduler_notify == 0)
367 tsleep(&scheduler_notify, 0, "nowork", 0);
368 goto loop;
372 * Fault the selected process in, then wait for a short period of
373 * time and loop up.
375 * XXX we need a heuristic to get a measure of system stress and
376 * then adjust our stagger wakeup delay accordingly.
378 lwkt_gettoken(&p->p_token);
379 faultin(p);
380 p->p_swtime = 0;
381 lwkt_reltoken(&p->p_token);
382 PRELE(p);
383 tsleep(&proc0, 0, "swapin", hz / 10);
384 goto loop;
388 * Process only has its hold count bumped, we need the token
389 * to safely scan the LWPs
391 static int
392 scheduler_callback(struct proc *p, void *data)
394 struct scheduler_info *info = data;
395 struct vmspace *vm;
396 struct lwp *lp;
397 segsz_t pgs;
398 int pri;
401 * We only care about processes in swap-wait. Interlock test with
402 * token if the flag is found set.
404 if ((p->p_flags & P_SWAPWAIT) == 0)
405 return 0;
406 lwkt_gettoken_shared(&p->p_token);
407 if ((p->p_flags & P_SWAPWAIT) == 0) {
408 lwkt_reltoken(&p->p_token);
409 return 0;
413 * Calculate priority for swap-in
415 pri = 0;
416 FOREACH_LWP_IN_PROC(lp, p) {
417 /* XXX lwp might need a different metric */
418 pri += lp->lwp_slptime;
420 pri += p->p_swtime - p->p_nice * 8;
423 * The more pages paged out while we were swapped,
424 * the more work we have to do to get up and running
425 * again and the lower our wakeup priority.
427 * Each second of sleep time is worth ~1MB
429 if ((vm = p->p_vmspace) != NULL) {
430 vmspace_hold(vm);
431 pgs = vmspace_resident_count(vm);
432 if (pgs < vm->vm_swrss) {
433 pri -= (vm->vm_swrss - pgs) /
434 (1024 * 1024 / PAGE_SIZE);
436 vmspace_drop(vm);
438 lwkt_reltoken(&p->p_token);
441 * If this process is higher priority and there is
442 * enough space, then select this process instead of
443 * the previous selection.
445 if (pri > info->ppri) {
446 if (info->pp)
447 PRELE(info->pp);
448 PHOLD(p);
449 info->pp = p;
450 info->ppri = pri;
452 return(0);
456 * SMP races ok.
457 * No requirements.
459 void
460 swapin_request(void)
462 if (scheduler_notify == 0) {
463 scheduler_notify = 1;
464 wakeup(&scheduler_notify);
468 #ifndef NO_SWAPPING
470 #define swappable(p) \
471 (((p)->p_lock == 0) && \
472 ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0)
476 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
478 static int swap_idle_threshold1 = 15;
479 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
480 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)");
483 * Swap_idle_threshold2 is the time that a process can be idle before
484 * it will be swapped out, if idle swapping is enabled. Default is
485 * one minute.
487 static int swap_idle_threshold2 = 60;
488 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
489 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped");
492 * Swapout is driven by the pageout daemon. Very simple, we find eligible
493 * procs and mark them as being swapped out. This will cause the kernel
494 * to prefer to pageout those proc's pages first and the procs in question
495 * will not return to user mode until the swapper tells them they can.
497 * If any procs have been sleeping/stopped for at least maxslp seconds,
498 * they are swapped. Else, we swap the longest-sleeping or stopped process,
499 * if any, otherwise the longest-resident process.
502 static int swapout_procs_callback(struct proc *p, void *data);
505 * No requirements.
507 void
508 swapout_procs(int action)
510 allproc_scan(swapout_procs_callback, &action, 0);
513 static int
514 swapout_procs_callback(struct proc *p, void *data)
516 struct lwp *lp;
517 int action = *(int *)data;
518 int minslp = -1;
520 if (!swappable(p))
521 return(0);
523 lwkt_gettoken(&p->p_token);
526 * We only consider active processes.
528 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) {
529 lwkt_reltoken(&p->p_token);
530 return(0);
533 FOREACH_LWP_IN_PROC(lp, p) {
535 * do not swap out a realtime process
537 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) {
538 lwkt_reltoken(&p->p_token);
539 return(0);
543 * Guarentee swap_idle_threshold time in memory
545 if (lp->lwp_slptime < swap_idle_threshold1) {
546 lwkt_reltoken(&p->p_token);
547 return(0);
551 * If the system is under memory stress, or if we
552 * are swapping idle processes >= swap_idle_threshold2,
553 * then swap the process out.
555 if (((action & VM_SWAP_NORMAL) == 0) &&
556 (((action & VM_SWAP_IDLE) == 0) ||
557 (lp->lwp_slptime < swap_idle_threshold2))) {
558 lwkt_reltoken(&p->p_token);
559 return(0);
562 if (minslp == -1 || lp->lwp_slptime < minslp)
563 minslp = lp->lwp_slptime;
567 * If the process has been asleep for awhile, swap
568 * it out.
570 if ((action & VM_SWAP_NORMAL) ||
571 ((action & VM_SWAP_IDLE) &&
572 (minslp > swap_idle_threshold2))) {
573 swapout(p);
577 * cleanup our reference
579 lwkt_reltoken(&p->p_token);
581 return(0);
585 * The caller must hold p->p_token
587 static void
588 swapout(struct proc *p)
590 #ifdef INVARIANTS
591 if (swap_debug)
592 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm);
593 #endif
594 ++p->p_ru.ru_nswap;
597 * remember the process resident count
599 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
600 p->p_flags |= P_SWAPPEDOUT;
601 p->p_swtime = 0;
604 #endif /* !NO_SWAPPING */