usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / vm / vm_pageout.c
blobd5b1ef18ed0459f140e2b3a3c51efe7dffbb1d3a
1 /*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
68 * The proverbial page-out daemon.
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
99 * System initialization
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, int *max_launderp,
104 int *vnodes_skippedp, struct vnode **vpfailedp,
105 int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *pagethread;
111 #if !defined(NO_SWAPPING)
112 /* the kernel process "vm_daemon"*/
113 static void vm_daemon (void);
114 static struct thread *vmthread;
116 static struct kproc_desc vm_kp = {
117 "vmdaemon",
118 vm_daemon,
119 &vmthread
121 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
122 #endif
124 int vm_pages_needed = 0; /* Event on which pageout daemon sleeps */
125 int vm_pageout_deficit = 0; /* Estimated number of pages deficit */
126 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
127 int vm_page_free_hysteresis = 16;
129 #if !defined(NO_SWAPPING)
130 static int vm_pageout_req_swapout;
131 static int vm_daemon_needed;
132 #endif
133 static int vm_max_launder = 4096;
134 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
135 static int vm_pageout_full_stats_interval = 0;
136 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
137 static int defer_swap_pageouts=0;
138 static int disable_swap_pageouts=0;
139 static u_int vm_anonmem_decline = ACT_DECLINE;
140 static u_int vm_filemem_decline = ACT_DECLINE * 2;
142 #if defined(NO_SWAPPING)
143 static int vm_swap_enabled=0;
144 static int vm_swap_idle_enabled=0;
145 #else
146 static int vm_swap_enabled=1;
147 static int vm_swap_idle_enabled=0;
148 #endif
149 int vm_pageout_memuse_mode=1; /* 0-disable, 1-passive, 2-active swp*/
151 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
152 CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
154 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
155 CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
157 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
158 CTLFLAG_RW, &vm_page_free_hysteresis, 0,
159 "Free more pages than the minimum required");
161 SYSCTL_INT(_vm, OID_AUTO, max_launder,
162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
167 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
174 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
175 SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
176 CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
178 #if defined(NO_SWAPPING)
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180 CTLFLAG_RD, &vm_swap_enabled, 0, "");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
183 #else
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
188 #endif
190 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
193 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
196 static int pageout_lock_miss;
197 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
200 int vm_page_max_wired; /* XXX max # of wired pages system-wide */
202 #if !defined(NO_SWAPPING)
203 static void vm_req_vmdaemon (void);
204 #endif
205 static void vm_pageout_page_stats(int q);
208 * Calculate approximately how many pages on each queue to try to
209 * clean. An exact calculation creates an edge condition when the
210 * queues are unbalanced so add significant slop. The queue scans
211 * will stop early when targets are reached and will start where they
212 * left off on the next pass.
214 * We need to be generous here because there are all sorts of loading
215 * conditions that can cause edge cases if try to average over all queues.
216 * In particular, storage subsystems have become so fast that paging
217 * activity can become quite frantic. Eventually we will probably need
218 * two paging threads, one for dirty pages and one for clean, to deal
219 * with the bandwidth requirements.
221 * So what we do is calculate a value that can be satisfied nominally by
222 * only having to scan half the queues.
224 static __inline int
225 PQAVERAGE(int n)
227 int avg;
229 if (n >= 0) {
230 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
231 } else {
232 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
234 return avg;
238 * vm_pageout_clean_helper:
240 * Clean the page and remove it from the laundry. The page must be busied
241 * by the caller and will be disposed of (put away, flushed) by this routine.
243 static int
244 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
246 vm_object_t object;
247 vm_page_t mc[BLIST_MAX_ALLOC];
248 int error;
249 int ib, is, page_base;
250 vm_pindex_t pindex = m->pindex;
252 object = m->object;
255 * Don't mess with the page if it's held or special.
257 * XXX do we really need to check hold_count here? hold_count
258 * isn't supposed to mess with vm_page ops except prevent the
259 * page from being reused.
261 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
262 vm_page_wakeup(m);
263 return 0;
267 * Place page in cluster. Align cluster for optimal swap space
268 * allocation (whether it is swap or not). This is typically ~16-32
269 * pages, which also tends to align the cluster to multiples of the
270 * filesystem block size if backed by a filesystem.
272 page_base = pindex % BLIST_MAX_ALLOC;
273 mc[page_base] = m;
274 ib = page_base - 1;
275 is = page_base + 1;
278 * Scan object for clusterable pages.
280 * We can cluster ONLY if: ->> the page is NOT
281 * clean, wired, busy, held, or mapped into a
282 * buffer, and one of the following:
283 * 1) The page is inactive, or a seldom used
284 * active page.
285 * -or-
286 * 2) we force the issue.
288 * During heavy mmap/modification loads the pageout
289 * daemon can really fragment the underlying file
290 * due to flushing pages out of order and not trying
291 * align the clusters (which leave sporatic out-of-order
292 * holes). To solve this problem we do the reverse scan
293 * first and attempt to align our cluster, then do a
294 * forward scan if room remains.
296 vm_object_hold(object);
298 while (ib >= 0) {
299 vm_page_t p;
301 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
302 TRUE, &error);
303 if (error || p == NULL)
304 break;
305 if ((p->queue - p->pc) == PQ_CACHE ||
306 (p->flags & PG_UNMANAGED)) {
307 vm_page_wakeup(p);
308 break;
310 vm_page_test_dirty(p);
311 if (((p->dirty & p->valid) == 0 &&
312 (p->flags & PG_NEED_COMMIT) == 0) ||
313 p->wire_count != 0 || /* may be held by buf cache */
314 p->hold_count != 0) { /* may be undergoing I/O */
315 vm_page_wakeup(p);
316 break;
318 if (p->queue - p->pc != PQ_INACTIVE) {
319 if (p->queue - p->pc != PQ_ACTIVE ||
320 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
321 vm_page_wakeup(p);
322 break;
327 * Try to maintain page groupings in the cluster.
329 if (m->flags & PG_WINATCFLS)
330 vm_page_flag_set(p, PG_WINATCFLS);
331 else
332 vm_page_flag_clear(p, PG_WINATCFLS);
333 p->act_count = m->act_count;
335 mc[ib] = p;
336 --ib;
338 ++ib; /* fixup */
340 while (is < BLIST_MAX_ALLOC &&
341 pindex - page_base + is < object->size) {
342 vm_page_t p;
344 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
345 TRUE, &error);
346 if (error || p == NULL)
347 break;
348 if (((p->queue - p->pc) == PQ_CACHE) ||
349 (p->flags & PG_UNMANAGED)) {
350 vm_page_wakeup(p);
351 break;
353 vm_page_test_dirty(p);
354 if (((p->dirty & p->valid) == 0 &&
355 (p->flags & PG_NEED_COMMIT) == 0) ||
356 p->wire_count != 0 || /* may be held by buf cache */
357 p->hold_count != 0) { /* may be undergoing I/O */
358 vm_page_wakeup(p);
359 break;
361 if (p->queue - p->pc != PQ_INACTIVE) {
362 if (p->queue - p->pc != PQ_ACTIVE ||
363 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
364 vm_page_wakeup(p);
365 break;
370 * Try to maintain page groupings in the cluster.
372 if (m->flags & PG_WINATCFLS)
373 vm_page_flag_set(p, PG_WINATCFLS);
374 else
375 vm_page_flag_clear(p, PG_WINATCFLS);
376 p->act_count = m->act_count;
378 mc[is] = p;
379 ++is;
382 vm_object_drop(object);
385 * we allow reads during pageouts...
387 return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
391 * vm_pageout_flush() - launder the given pages
393 * The given pages are laundered. Note that we setup for the start of
394 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
395 * reference count all in here rather then in the parent. If we want
396 * the parent to do more sophisticated things we may have to change
397 * the ordering.
399 * The pages in the array must be busied by the caller and will be
400 * unbusied by this function.
403 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
405 vm_object_t object;
406 int pageout_status[count];
407 int numpagedout = 0;
408 int i;
411 * Initiate I/O. Bump the vm_page_t->busy counter.
413 for (i = 0; i < count; i++) {
414 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
415 ("vm_pageout_flush page %p index %d/%d: partially "
416 "invalid page", mc[i], i, count));
417 vm_page_io_start(mc[i]);
421 * We must make the pages read-only. This will also force the
422 * modified bit in the related pmaps to be cleared. The pager
423 * cannot clear the bit for us since the I/O completion code
424 * typically runs from an interrupt. The act of making the page
425 * read-only handles the case for us.
427 * Then we can unbusy the pages, we still hold a reference by virtue
428 * of our soft-busy.
430 for (i = 0; i < count; i++) {
431 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
432 vm_page_protect(mc[i], VM_PROT_NONE);
433 else
434 vm_page_protect(mc[i], VM_PROT_READ);
435 vm_page_wakeup(mc[i]);
438 object = mc[0]->object;
439 vm_object_pip_add(object, count);
441 vm_pager_put_pages(object, mc, count,
442 (vmflush_flags |
443 ((object == &kernel_object) ?
444 VM_PAGER_PUT_SYNC : 0)),
445 pageout_status);
447 for (i = 0; i < count; i++) {
448 vm_page_t mt = mc[i];
450 switch (pageout_status[i]) {
451 case VM_PAGER_OK:
452 numpagedout++;
453 break;
454 case VM_PAGER_PEND:
455 numpagedout++;
456 break;
457 case VM_PAGER_BAD:
459 * Page outside of range of object. Right now we
460 * essentially lose the changes by pretending it
461 * worked.
463 vm_page_busy_wait(mt, FALSE, "pgbad");
464 pmap_clear_modify(mt);
465 vm_page_undirty(mt);
466 vm_page_wakeup(mt);
467 break;
468 case VM_PAGER_ERROR:
469 case VM_PAGER_FAIL:
471 * A page typically cannot be paged out when we
472 * have run out of swap. We leave the page
473 * marked inactive and will try to page it out
474 * again later.
476 * Starvation of the active page list is used to
477 * determine when the system is massively memory
478 * starved.
480 break;
481 case VM_PAGER_AGAIN:
482 break;
486 * If not PENDing this was a synchronous operation and we
487 * clean up after the I/O. If it is PENDing the mess is
488 * cleaned up asynchronously.
490 * Also nominally act on the caller's wishes if the caller
491 * wants to try to really clean (cache or free) the page.
493 * Also nominally deactivate the page if the system is
494 * memory-stressed.
496 if (pageout_status[i] != VM_PAGER_PEND) {
497 vm_page_busy_wait(mt, FALSE, "pgouw");
498 vm_page_io_finish(mt);
499 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
500 vm_page_try_to_cache(mt);
501 } else if (vm_page_count_severe()) {
502 vm_page_deactivate(mt);
503 vm_page_wakeup(mt);
504 } else {
505 vm_page_wakeup(mt);
507 vm_object_pip_wakeup(object);
510 return numpagedout;
513 #if !defined(NO_SWAPPING)
516 * Callback function, page busied for us. We must dispose of the busy
517 * condition. Any related pmap pages may be held but will not be locked.
519 static
521 vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
522 vm_page_t p)
524 int actcount;
525 int cleanit = 0;
528 * Basic tests - There should never be a marker, and we can stop
529 * once the RSS is below the required level.
531 KKASSERT((p->flags & PG_MARKER) == 0);
532 if (pmap_resident_tlnw_count(info->pmap) <= info->limit) {
533 vm_page_wakeup(p);
534 return(-1);
537 mycpu->gd_cnt.v_pdpages++;
539 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
540 vm_page_wakeup(p);
541 goto done;
544 ++info->actioncount;
547 * Check if the page has been referened recently. If it has,
548 * activate it and skip.
550 actcount = pmap_ts_referenced(p);
551 if (actcount) {
552 vm_page_flag_set(p, PG_REFERENCED);
553 } else if (p->flags & PG_REFERENCED) {
554 actcount = 1;
557 if (actcount) {
558 if (p->queue - p->pc != PQ_ACTIVE) {
559 vm_page_and_queue_spin_lock(p);
560 if (p->queue - p->pc != PQ_ACTIVE) {
561 vm_page_and_queue_spin_unlock(p);
562 vm_page_activate(p);
563 } else {
564 vm_page_and_queue_spin_unlock(p);
566 } else {
567 p->act_count += actcount;
568 if (p->act_count > ACT_MAX)
569 p->act_count = ACT_MAX;
571 vm_page_flag_clear(p, PG_REFERENCED);
572 vm_page_wakeup(p);
573 goto done;
577 * Remove the page from this particular pmap. Once we do this, our
578 * pmap scans will not see it again (unless it gets faulted in), so
579 * we must actively dispose of or deal with the page.
581 pmap_remove_specific(info->pmap, p);
584 * If the page is not mapped to another process (i.e. as would be
585 * typical if this were a shared page from a library) then deactivate
586 * the page and clean it in two passes only.
588 * If the page hasn't been referenced since the last check, remove it
589 * from the pmap. If it is no longer mapped, deactivate it
590 * immediately, accelerating the normal decline.
592 * Once the page has been removed from the pmap the RSS code no
593 * longer tracks it so we have to make sure that it is staged for
594 * potential flush action.
596 if ((p->flags & PG_MAPPED) == 0) {
597 if (p->queue - p->pc == PQ_ACTIVE) {
598 vm_page_deactivate(p);
600 if (p->queue - p->pc == PQ_INACTIVE) {
601 cleanit = 1;
606 * Ok, try to fully clean the page and any nearby pages such that at
607 * least the requested page is freed or moved to the cache queue.
609 * We usually do this synchronously to allow us to get the page into
610 * the CACHE queue quickly, which will prevent memory exhaustion if
611 * a process with a memoryuse limit is running away. However, the
612 * sysadmin may desire to set vm.swap_user_async which relaxes this
613 * and improves write performance.
615 if (cleanit) {
616 int max_launder = 0x7FFF;
617 int vnodes_skipped = 0;
618 int vmflush_flags;
619 struct vnode *vpfailed = NULL;
621 info->offset = va;
623 if (vm_pageout_memuse_mode >= 2) {
624 vmflush_flags = VM_PAGER_TRY_TO_CACHE |
625 VM_PAGER_ALLOW_ACTIVE;
626 if (swap_user_async == 0)
627 vmflush_flags |= VM_PAGER_PUT_SYNC;
628 vm_page_flag_set(p, PG_WINATCFLS);
629 info->cleancount +=
630 vm_pageout_page(p, &max_launder,
631 &vnodes_skipped,
632 &vpfailed, 1, vmflush_flags);
633 } else {
634 vm_page_wakeup(p);
635 ++info->cleancount;
637 } else {
638 vm_page_wakeup(p);
642 * Must be at end to avoid SMP races.
644 done:
645 lwkt_user_yield();
646 return 0;
650 * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
651 * that is relatively difficult to do. We try to keep track of where we
652 * left off last time to reduce scan overhead.
654 * Called when vm_pageout_memuse_mode is >= 1.
656 void
657 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
659 vm_offset_t pgout_offset;
660 struct pmap_pgscan_info info;
661 int retries = 3;
663 pgout_offset = map->pgout_offset;
664 again:
665 #if 0
666 kprintf("%016jx ", pgout_offset);
667 #endif
668 if (pgout_offset < VM_MIN_USER_ADDRESS)
669 pgout_offset = VM_MIN_USER_ADDRESS;
670 if (pgout_offset >= VM_MAX_USER_ADDRESS)
671 pgout_offset = 0;
672 info.pmap = vm_map_pmap(map);
673 info.limit = limit;
674 info.beg_addr = pgout_offset;
675 info.end_addr = VM_MAX_USER_ADDRESS;
676 info.callback = vm_pageout_mdp_callback;
677 info.cleancount = 0;
678 info.actioncount = 0;
679 info.busycount = 0;
681 pmap_pgscan(&info);
682 pgout_offset = info.offset;
683 #if 0
684 kprintf("%016jx %08lx %08lx\n", pgout_offset,
685 info.cleancount, info.actioncount);
686 #endif
688 if (pgout_offset != VM_MAX_USER_ADDRESS &&
689 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
690 goto again;
691 } else if (retries &&
692 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
693 --retries;
694 goto again;
696 map->pgout_offset = pgout_offset;
698 #endif
701 * Called when the pageout scan wants to free a page. We no longer
702 * try to cycle the vm_object here with a reference & dealloc, which can
703 * cause a non-trivial object collapse in a critical path.
705 * It is unclear why we cycled the ref_count in the past, perhaps to try
706 * to optimize shadow chain collapses but I don't quite see why it would
707 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
708 * synchronously and not have to be kicked-start.
710 static void
711 vm_pageout_page_free(vm_page_t m)
713 vm_page_protect(m, VM_PROT_NONE);
714 vm_page_free(m);
718 * vm_pageout_scan does the dirty work for the pageout daemon.
720 struct vm_pageout_scan_info {
721 struct proc *bigproc;
722 vm_offset_t bigsize;
725 static int vm_pageout_scan_callback(struct proc *p, void *data);
727 static int
728 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
729 int *vnodes_skipped)
731 vm_page_t m;
732 struct vm_page marker;
733 struct vnode *vpfailed; /* warning, allowed to be stale */
734 int maxscan;
735 int delta = 0;
736 int max_launder;
739 * Start scanning the inactive queue for pages we can move to the
740 * cache or free. The scan will stop when the target is reached or
741 * we have scanned the entire inactive queue. Note that m->act_count
742 * is not used to form decisions for the inactive queue, only for the
743 * active queue.
745 * max_launder limits the number of dirty pages we flush per scan.
746 * For most systems a smaller value (16 or 32) is more robust under
747 * extreme memory and disk pressure because any unnecessary writes
748 * to disk can result in extreme performance degredation. However,
749 * systems with excessive dirty pages (especially when MAP_NOSYNC is
750 * used) will die horribly with limited laundering. If the pageout
751 * daemon cannot clean enough pages in the first pass, we let it go
752 * all out in succeeding passes.
754 if ((max_launder = vm_max_launder) <= 1)
755 max_launder = 1;
756 if (pass)
757 max_launder = 10000;
760 * Initialize our marker
762 bzero(&marker, sizeof(marker));
763 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
764 marker.queue = PQ_INACTIVE + q;
765 marker.pc = q;
766 marker.wire_count = 1;
769 * Inactive queue scan.
771 * NOTE: The vm_page must be spinlocked before the queue to avoid
772 * deadlocks, so it is easiest to simply iterate the loop
773 * with the queue unlocked at the top.
775 vpfailed = NULL;
777 vm_page_queues_spin_lock(PQ_INACTIVE + q);
778 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
779 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
782 * Queue locked at top of loop to avoid stack marker issues.
784 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
785 maxscan-- > 0 && avail_shortage - delta > 0)
787 int count;
789 KKASSERT(m->queue == PQ_INACTIVE + q);
790 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
791 &marker, pageq);
792 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
793 &marker, pageq);
794 mycpu->gd_cnt.v_pdpages++;
797 * Skip marker pages (atomic against other markers to avoid
798 * infinite hop-over scans).
800 if (m->flags & PG_MARKER)
801 continue;
804 * Try to busy the page. Don't mess with pages which are
805 * already busy or reorder them in the queue.
807 if (vm_page_busy_try(m, TRUE))
808 continue;
811 * Remaining operations run with the page busy and neither
812 * the page or the queue will be spin-locked.
814 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
815 KKASSERT(m->queue == PQ_INACTIVE + q);
817 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
818 &vpfailed, pass, 0);
819 delta += count;
822 * Systems with a ton of memory can wind up with huge
823 * deactivation counts. Because the inactive scan is
824 * doing a lot of flushing, the combination can result
825 * in excessive paging even in situations where other
826 * unrelated threads free up sufficient VM.
828 * To deal with this we abort the nominal active->inactive
829 * scan before we hit the inactive target when free+cache
830 * levels have reached a reasonable target.
832 * When deciding to stop early we need to add some slop to
833 * the test and we need to return full completion to the caller
834 * to prevent the caller from thinking there is something
835 * wrong and issuing a low-memory+swap warning or pkill.
837 * A deficit forces paging regardless of the state of the
838 * VM page queues (used for RSS enforcement).
840 lwkt_yield();
841 vm_page_queues_spin_lock(PQ_INACTIVE + q);
842 if (vm_paging_target() < -vm_max_launder) {
844 * Stopping early, return full completion to caller.
846 if (delta < avail_shortage)
847 delta = avail_shortage;
848 break;
852 /* page queue still spin-locked */
853 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
854 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
856 return (delta);
860 * Pageout the specified page, return the total number of pages paged out
861 * (this routine may cluster).
863 * The page must be busied and soft-busied by the caller and will be disposed
864 * of by this function.
866 static int
867 vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
868 struct vnode **vpfailedp, int pass, int vmflush_flags)
870 vm_object_t object;
871 int actcount;
872 int count = 0;
875 * It is possible for a page to be busied ad-hoc (e.g. the
876 * pmap_collect() code) and wired and race against the
877 * allocation of a new page. vm_page_alloc() may be forced
878 * to deactivate the wired page in which case it winds up
879 * on the inactive queue and must be handled here. We
880 * correct the problem simply by unqueuing the page.
882 if (m->wire_count) {
883 vm_page_unqueue_nowakeup(m);
884 vm_page_wakeup(m);
885 kprintf("WARNING: pagedaemon: wired page on "
886 "inactive queue %p\n", m);
887 return 0;
891 * A held page may be undergoing I/O, so skip it.
893 if (m->hold_count) {
894 vm_page_and_queue_spin_lock(m);
895 if (m->queue - m->pc == PQ_INACTIVE) {
896 TAILQ_REMOVE(
897 &vm_page_queues[m->queue].pl, m, pageq);
898 TAILQ_INSERT_TAIL(
899 &vm_page_queues[m->queue].pl, m, pageq);
900 ++vm_swapcache_inactive_heuristic;
902 vm_page_and_queue_spin_unlock(m);
903 vm_page_wakeup(m);
904 return 0;
907 if (m->object == NULL || m->object->ref_count == 0) {
909 * If the object is not being used, we ignore previous
910 * references.
912 vm_page_flag_clear(m, PG_REFERENCED);
913 pmap_clear_reference(m);
914 /* fall through to end */
915 } else if (((m->flags & PG_REFERENCED) == 0) &&
916 (actcount = pmap_ts_referenced(m))) {
918 * Otherwise, if the page has been referenced while
919 * in the inactive queue, we bump the "activation
920 * count" upwards, making it less likely that the
921 * page will be added back to the inactive queue
922 * prematurely again. Here we check the page tables
923 * (or emulated bits, if any), given the upper level
924 * VM system not knowing anything about existing
925 * references.
927 vm_page_activate(m);
928 m->act_count += (actcount + ACT_ADVANCE);
929 vm_page_wakeup(m);
930 return 0;
934 * (m) is still busied.
936 * If the upper level VM system knows about any page
937 * references, we activate the page. We also set the
938 * "activation count" higher than normal so that we will less
939 * likely place pages back onto the inactive queue again.
941 if ((m->flags & PG_REFERENCED) != 0) {
942 vm_page_flag_clear(m, PG_REFERENCED);
943 actcount = pmap_ts_referenced(m);
944 vm_page_activate(m);
945 m->act_count += (actcount + ACT_ADVANCE + 1);
946 vm_page_wakeup(m);
947 return 0;
951 * If the upper level VM system doesn't know anything about
952 * the page being dirty, we have to check for it again. As
953 * far as the VM code knows, any partially dirty pages are
954 * fully dirty.
956 * Pages marked PG_WRITEABLE may be mapped into the user
957 * address space of a process running on another cpu. A
958 * user process (without holding the MP lock) running on
959 * another cpu may be able to touch the page while we are
960 * trying to remove it. vm_page_cache() will handle this
961 * case for us.
963 if (m->dirty == 0) {
964 vm_page_test_dirty(m);
965 } else {
966 vm_page_dirty(m);
969 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
971 * Invalid pages can be easily freed
973 vm_pageout_page_free(m);
974 mycpu->gd_cnt.v_dfree++;
975 ++count;
976 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
978 * Clean pages can be placed onto the cache queue.
979 * This effectively frees them.
981 vm_page_cache(m);
982 ++count;
983 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
985 * Dirty pages need to be paged out, but flushing
986 * a page is extremely expensive verses freeing
987 * a clean page. Rather then artificially limiting
988 * the number of pages we can flush, we instead give
989 * dirty pages extra priority on the inactive queue
990 * by forcing them to be cycled through the queue
991 * twice before being flushed, after which the
992 * (now clean) page will cycle through once more
993 * before being freed. This significantly extends
994 * the thrash point for a heavily loaded machine.
996 vm_page_flag_set(m, PG_WINATCFLS);
997 vm_page_and_queue_spin_lock(m);
998 if (m->queue - m->pc == PQ_INACTIVE) {
999 TAILQ_REMOVE(
1000 &vm_page_queues[m->queue].pl, m, pageq);
1001 TAILQ_INSERT_TAIL(
1002 &vm_page_queues[m->queue].pl, m, pageq);
1003 ++vm_swapcache_inactive_heuristic;
1005 vm_page_and_queue_spin_unlock(m);
1006 vm_page_wakeup(m);
1007 } else if (*max_launderp > 0) {
1009 * We always want to try to flush some dirty pages if
1010 * we encounter them, to keep the system stable.
1011 * Normally this number is small, but under extreme
1012 * pressure where there are insufficient clean pages
1013 * on the inactive queue, we may have to go all out.
1015 int swap_pageouts_ok;
1016 struct vnode *vp = NULL;
1018 swap_pageouts_ok = 0;
1019 object = m->object;
1020 if (object &&
1021 (object->type != OBJT_SWAP) &&
1022 (object->type != OBJT_DEFAULT)) {
1023 swap_pageouts_ok = 1;
1024 } else {
1025 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1026 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1027 vm_page_count_min(0));
1031 * We don't bother paging objects that are "dead".
1032 * Those objects are in a "rundown" state.
1034 if (!swap_pageouts_ok ||
1035 (object == NULL) ||
1036 (object->flags & OBJ_DEAD)) {
1037 vm_page_and_queue_spin_lock(m);
1038 if (m->queue - m->pc == PQ_INACTIVE) {
1039 TAILQ_REMOVE(
1040 &vm_page_queues[m->queue].pl,
1041 m, pageq);
1042 TAILQ_INSERT_TAIL(
1043 &vm_page_queues[m->queue].pl,
1044 m, pageq);
1045 ++vm_swapcache_inactive_heuristic;
1047 vm_page_and_queue_spin_unlock(m);
1048 vm_page_wakeup(m);
1049 return 0;
1053 * (m) is still busied.
1055 * The object is already known NOT to be dead. It
1056 * is possible for the vget() to block the whole
1057 * pageout daemon, but the new low-memory handling
1058 * code should prevent it.
1060 * The previous code skipped locked vnodes and, worse,
1061 * reordered pages in the queue. This results in
1062 * completely non-deterministic operation because,
1063 * quite often, a vm_fault has initiated an I/O and
1064 * is holding a locked vnode at just the point where
1065 * the pageout daemon is woken up.
1067 * We can't wait forever for the vnode lock, we might
1068 * deadlock due to a vn_read() getting stuck in
1069 * vm_wait while holding this vnode. We skip the
1070 * vnode if we can't get it in a reasonable amount
1071 * of time.
1073 * vpfailed is used to (try to) avoid the case where
1074 * a large number of pages are associated with a
1075 * locked vnode, which could cause the pageout daemon
1076 * to stall for an excessive amount of time.
1078 if (object->type == OBJT_VNODE) {
1079 int flags;
1081 vp = object->handle;
1082 flags = LK_EXCLUSIVE;
1083 if (vp == *vpfailedp)
1084 flags |= LK_NOWAIT;
1085 else
1086 flags |= LK_TIMELOCK;
1087 vm_page_hold(m);
1088 vm_page_wakeup(m);
1091 * We have unbusied (m) temporarily so we can
1092 * acquire the vp lock without deadlocking.
1093 * (m) is held to prevent destruction.
1095 if (vget(vp, flags) != 0) {
1096 *vpfailedp = vp;
1097 ++pageout_lock_miss;
1098 if (object->flags & OBJ_MIGHTBEDIRTY)
1099 ++*vnodes_skippedp;
1100 vm_page_unhold(m);
1101 return 0;
1105 * The page might have been moved to another
1106 * queue during potential blocking in vget()
1107 * above. The page might have been freed and
1108 * reused for another vnode. The object might
1109 * have been reused for another vnode.
1111 if (m->queue - m->pc != PQ_INACTIVE ||
1112 m->object != object ||
1113 object->handle != vp) {
1114 if (object->flags & OBJ_MIGHTBEDIRTY)
1115 ++*vnodes_skippedp;
1116 vput(vp);
1117 vm_page_unhold(m);
1118 return 0;
1122 * The page may have been busied during the
1123 * blocking in vput(); We don't move the
1124 * page back onto the end of the queue so that
1125 * statistics are more correct if we don't.
1127 if (vm_page_busy_try(m, TRUE)) {
1128 vput(vp);
1129 vm_page_unhold(m);
1130 return 0;
1132 vm_page_unhold(m);
1135 * (m) is busied again
1137 * We own the busy bit and remove our hold
1138 * bit. If the page is still held it
1139 * might be undergoing I/O, so skip it.
1141 if (m->hold_count) {
1142 vm_page_and_queue_spin_lock(m);
1143 if (m->queue - m->pc == PQ_INACTIVE) {
1144 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1145 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1146 ++vm_swapcache_inactive_heuristic;
1148 vm_page_and_queue_spin_unlock(m);
1149 if (object->flags & OBJ_MIGHTBEDIRTY)
1150 ++*vnodes_skippedp;
1151 vm_page_wakeup(m);
1152 vput(vp);
1153 return 0;
1155 /* (m) is left busied as we fall through */
1159 * page is busy and not held here.
1161 * If a page is dirty, then it is either being washed
1162 * (but not yet cleaned) or it is still in the
1163 * laundry. If it is still in the laundry, then we
1164 * start the cleaning operation.
1166 * decrement inactive_shortage on success to account
1167 * for the (future) cleaned page. Otherwise we
1168 * could wind up laundering or cleaning too many
1169 * pages.
1171 * NOTE: Cleaning the page here does not cause
1172 * force_deficit to be adjusted, because the
1173 * page is not being freed or moved to the
1174 * cache.
1176 count = vm_pageout_clean_helper(m, vmflush_flags);
1177 *max_launderp -= count;
1180 * Clean ate busy, page no longer accessible
1182 if (vp != NULL)
1183 vput(vp);
1184 } else {
1185 vm_page_wakeup(m);
1187 return count;
1190 static int
1191 vm_pageout_scan_active(int pass, int q,
1192 int avail_shortage, int inactive_shortage,
1193 int *recycle_countp)
1195 struct vm_page marker;
1196 vm_page_t m;
1197 int actcount;
1198 int delta = 0;
1199 int maxscan;
1202 * We want to move pages from the active queue to the inactive
1203 * queue to get the inactive queue to the inactive target. If
1204 * we still have a page shortage from above we try to directly free
1205 * clean pages instead of moving them.
1207 * If we do still have a shortage we keep track of the number of
1208 * pages we free or cache (recycle_count) as a measure of thrashing
1209 * between the active and inactive queues.
1211 * If we were able to completely satisfy the free+cache targets
1212 * from the inactive pool we limit the number of pages we move
1213 * from the active pool to the inactive pool to 2x the pages we
1214 * had removed from the inactive pool (with a minimum of 1/5 the
1215 * inactive target). If we were not able to completely satisfy
1216 * the free+cache targets we go for the whole target aggressively.
1218 * NOTE: Both variables can end up negative.
1219 * NOTE: We are still in a critical section.
1222 bzero(&marker, sizeof(marker));
1223 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1224 marker.queue = PQ_ACTIVE + q;
1225 marker.pc = q;
1226 marker.wire_count = 1;
1228 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1229 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1230 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1233 * Queue locked at top of loop to avoid stack marker issues.
1235 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1236 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1237 inactive_shortage > 0))
1239 KKASSERT(m->queue == PQ_ACTIVE + q);
1240 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1241 &marker, pageq);
1242 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1243 &marker, pageq);
1246 * Skip marker pages (atomic against other markers to avoid
1247 * infinite hop-over scans).
1249 if (m->flags & PG_MARKER)
1250 continue;
1253 * Try to busy the page. Don't mess with pages which are
1254 * already busy or reorder them in the queue.
1256 if (vm_page_busy_try(m, TRUE))
1257 continue;
1260 * Remaining operations run with the page busy and neither
1261 * the page or the queue will be spin-locked.
1263 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1264 KKASSERT(m->queue == PQ_ACTIVE + q);
1267 * Don't deactivate pages that are held, even if we can
1268 * busy them. (XXX why not?)
1270 if (m->hold_count != 0) {
1271 vm_page_and_queue_spin_lock(m);
1272 if (m->queue - m->pc == PQ_ACTIVE) {
1273 TAILQ_REMOVE(
1274 &vm_page_queues[PQ_ACTIVE + q].pl,
1275 m, pageq);
1276 TAILQ_INSERT_TAIL(
1277 &vm_page_queues[PQ_ACTIVE + q].pl,
1278 m, pageq);
1280 vm_page_and_queue_spin_unlock(m);
1281 vm_page_wakeup(m);
1282 goto next;
1286 * The count for pagedaemon pages is done after checking the
1287 * page for eligibility...
1289 mycpu->gd_cnt.v_pdpages++;
1292 * Check to see "how much" the page has been used and clear
1293 * the tracking access bits. If the object has no references
1294 * don't bother paying the expense.
1296 actcount = 0;
1297 if (m->object && m->object->ref_count != 0) {
1298 if (m->flags & PG_REFERENCED)
1299 ++actcount;
1300 actcount += pmap_ts_referenced(m);
1301 if (actcount) {
1302 m->act_count += ACT_ADVANCE + actcount;
1303 if (m->act_count > ACT_MAX)
1304 m->act_count = ACT_MAX;
1307 vm_page_flag_clear(m, PG_REFERENCED);
1310 * actcount is only valid if the object ref_count is non-zero.
1311 * If the page does not have an object, actcount will be zero.
1313 if (actcount && m->object->ref_count != 0) {
1314 vm_page_and_queue_spin_lock(m);
1315 if (m->queue - m->pc == PQ_ACTIVE) {
1316 TAILQ_REMOVE(
1317 &vm_page_queues[PQ_ACTIVE + q].pl,
1318 m, pageq);
1319 TAILQ_INSERT_TAIL(
1320 &vm_page_queues[PQ_ACTIVE + q].pl,
1321 m, pageq);
1323 vm_page_and_queue_spin_unlock(m);
1324 vm_page_wakeup(m);
1325 } else {
1326 switch(m->object->type) {
1327 case OBJT_DEFAULT:
1328 case OBJT_SWAP:
1329 m->act_count -= min(m->act_count,
1330 vm_anonmem_decline);
1331 break;
1332 default:
1333 m->act_count -= min(m->act_count,
1334 vm_filemem_decline);
1335 break;
1337 if (vm_pageout_algorithm ||
1338 (m->object == NULL) ||
1339 (m->object && (m->object->ref_count == 0)) ||
1340 m->act_count < pass + 1
1343 * Deactivate the page. If we had a
1344 * shortage from our inactive scan try to
1345 * free (cache) the page instead.
1347 * Don't just blindly cache the page if
1348 * we do not have a shortage from the
1349 * inactive scan, that could lead to
1350 * gigabytes being moved.
1352 --inactive_shortage;
1353 if (avail_shortage - delta > 0 ||
1354 (m->object && (m->object->ref_count == 0)))
1356 if (avail_shortage - delta > 0)
1357 ++*recycle_countp;
1358 vm_page_protect(m, VM_PROT_NONE);
1359 if (m->dirty == 0 &&
1360 (m->flags & PG_NEED_COMMIT) == 0 &&
1361 avail_shortage - delta > 0) {
1362 vm_page_cache(m);
1363 } else {
1364 vm_page_deactivate(m);
1365 vm_page_wakeup(m);
1367 } else {
1368 vm_page_deactivate(m);
1369 vm_page_wakeup(m);
1371 ++delta;
1372 } else {
1373 vm_page_and_queue_spin_lock(m);
1374 if (m->queue - m->pc == PQ_ACTIVE) {
1375 TAILQ_REMOVE(
1376 &vm_page_queues[PQ_ACTIVE + q].pl,
1377 m, pageq);
1378 TAILQ_INSERT_TAIL(
1379 &vm_page_queues[PQ_ACTIVE + q].pl,
1380 m, pageq);
1382 vm_page_and_queue_spin_unlock(m);
1383 vm_page_wakeup(m);
1386 next:
1387 lwkt_yield();
1388 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1392 * Clean out our local marker.
1394 * Page queue still spin-locked.
1396 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1397 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1399 return (delta);
1403 * The number of actually free pages can drop down to v_free_reserved,
1404 * we try to build the free count back above v_free_min. Note that
1405 * vm_paging_needed() also returns TRUE if v_free_count is not at
1406 * least v_free_min so that is the minimum we must build the free
1407 * count to.
1409 * We use a slightly higher target to improve hysteresis,
1410 * ((v_free_target + v_free_min) / 2). Since v_free_target
1411 * is usually the same as v_cache_min this maintains about
1412 * half the pages in the free queue as are in the cache queue,
1413 * providing pretty good pipelining for pageout operation.
1415 * The system operator can manipulate vm.v_cache_min and
1416 * vm.v_free_target to tune the pageout demon. Be sure
1417 * to keep vm.v_free_min < vm.v_free_target.
1419 * Note that the original paging target is to get at least
1420 * (free_min + cache_min) into (free + cache). The slightly
1421 * higher target will shift additional pages from cache to free
1422 * without effecting the original paging target in order to
1423 * maintain better hysteresis and not have the free count always
1424 * be dead-on v_free_min.
1426 * NOTE: we are still in a critical section.
1428 * Pages moved from PQ_CACHE to totally free are not counted in the
1429 * pages_freed counter.
1431 static void
1432 vm_pageout_scan_cache(int avail_shortage, int pass,
1433 int vnodes_skipped, int recycle_count)
1435 static int lastkillticks;
1436 struct vm_pageout_scan_info info;
1437 vm_page_t m;
1439 while (vmstats.v_free_count <
1440 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1442 * This steals some code from vm/vm_page.c
1444 static int cache_rover = 0;
1446 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK);
1447 if (m == NULL)
1448 break;
1449 /* page is returned removed from its queue and spinlocked */
1450 if (vm_page_busy_try(m, TRUE)) {
1451 vm_page_deactivate_locked(m);
1452 vm_page_spin_unlock(m);
1453 continue;
1455 vm_page_spin_unlock(m);
1456 pagedaemon_wakeup();
1457 lwkt_yield();
1460 * Remaining operations run with the page busy and neither
1461 * the page or the queue will be spin-locked.
1463 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1464 m->hold_count ||
1465 m->wire_count) {
1466 vm_page_deactivate(m);
1467 vm_page_wakeup(m);
1468 continue;
1470 KKASSERT((m->flags & PG_MAPPED) == 0);
1471 KKASSERT(m->dirty == 0);
1472 cache_rover += PQ_PRIME2;
1473 vm_pageout_page_free(m);
1474 mycpu->gd_cnt.v_dfree++;
1477 #if !defined(NO_SWAPPING)
1479 * Idle process swapout -- run once per second.
1481 if (vm_swap_idle_enabled) {
1482 static time_t lsec;
1483 if (time_uptime != lsec) {
1484 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1485 vm_req_vmdaemon();
1486 lsec = time_uptime;
1489 #endif
1492 * If we didn't get enough free pages, and we have skipped a vnode
1493 * in a writeable object, wakeup the sync daemon. And kick swapout
1494 * if we did not get enough free pages.
1496 if (vm_paging_target() > 0) {
1497 if (vnodes_skipped && vm_page_count_min(0))
1498 speedup_syncer(NULL);
1499 #if !defined(NO_SWAPPING)
1500 if (vm_swap_enabled && vm_page_count_target()) {
1501 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1502 vm_req_vmdaemon();
1504 #endif
1508 * Handle catastrophic conditions. Under good conditions we should
1509 * be at the target, well beyond our minimum. If we could not even
1510 * reach our minimum the system is under heavy stress. But just being
1511 * under heavy stress does not trigger process killing.
1513 * We consider ourselves to have run out of memory if the swap pager
1514 * is full and avail_shortage is still positive. The secondary check
1515 * ensures that we do not kill processes if the instantanious
1516 * availability is good, even if the pageout demon pass says it
1517 * couldn't get to the target.
1519 if (swap_pager_almost_full &&
1520 pass > 0 &&
1521 (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1522 kprintf("Warning: system low on memory+swap "
1523 "shortage %d for %d ticks!\n",
1524 avail_shortage, ticks - swap_fail_ticks);
1525 if (bootverbose)
1526 kprintf("Metrics: spaf=%d spf=%d pass=%d avail=%d target=%d last=%u\n",
1527 swap_pager_almost_full,
1528 swap_pager_full,
1529 pass,
1530 avail_shortage,
1531 vm_paging_target(),
1532 (unsigned int)(ticks - lastkillticks));
1534 if (swap_pager_full &&
1535 pass > 1 &&
1536 avail_shortage > 0 &&
1537 vm_paging_target() > 0 &&
1538 (unsigned int)(ticks - lastkillticks) >= hz) {
1540 * Kill something, maximum rate once per second to give
1541 * the process time to free up sufficient memory.
1543 lastkillticks = ticks;
1544 info.bigproc = NULL;
1545 info.bigsize = 0;
1546 allproc_scan(vm_pageout_scan_callback, &info);
1547 if (info.bigproc != NULL) {
1548 kprintf("Try to kill process %d %s\n",
1549 info.bigproc->p_pid, info.bigproc->p_comm);
1550 info.bigproc->p_nice = PRIO_MIN;
1551 info.bigproc->p_usched->resetpriority(
1552 FIRST_LWP_IN_PROC(info.bigproc));
1553 atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1554 killproc(info.bigproc, "out of swap space");
1555 wakeup(&vmstats.v_free_count);
1556 PRELE(info.bigproc);
1561 static int
1562 vm_pageout_scan_callback(struct proc *p, void *data)
1564 struct vm_pageout_scan_info *info = data;
1565 vm_offset_t size;
1568 * Never kill system processes or init. If we have configured swap
1569 * then try to avoid killing low-numbered pids.
1571 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1572 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1573 return (0);
1576 lwkt_gettoken(&p->p_token);
1579 * if the process is in a non-running type state,
1580 * don't touch it.
1582 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1583 lwkt_reltoken(&p->p_token);
1584 return (0);
1588 * Get the approximate process size. Note that anonymous pages
1589 * with backing swap will be counted twice, but there should not
1590 * be too many such pages due to the stress the VM system is
1591 * under at this point.
1593 size = vmspace_anonymous_count(p->p_vmspace) +
1594 vmspace_swap_count(p->p_vmspace);
1597 * If the this process is bigger than the biggest one
1598 * remember it.
1600 if (info->bigsize < size) {
1601 if (info->bigproc)
1602 PRELE(info->bigproc);
1603 PHOLD(p);
1604 info->bigproc = p;
1605 info->bigsize = size;
1607 lwkt_reltoken(&p->p_token);
1608 lwkt_yield();
1610 return(0);
1614 * This routine tries to maintain the pseudo LRU active queue,
1615 * so that during long periods of time where there is no paging,
1616 * that some statistic accumulation still occurs. This code
1617 * helps the situation where paging just starts to occur.
1619 static void
1620 vm_pageout_page_stats(int q)
1622 static int fullintervalcount = 0;
1623 struct vm_page marker;
1624 vm_page_t m;
1625 int pcount, tpcount; /* Number of pages to check */
1626 int page_shortage;
1628 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1629 vmstats.v_free_min) -
1630 (vmstats.v_free_count + vmstats.v_inactive_count +
1631 vmstats.v_cache_count);
1633 if (page_shortage <= 0)
1634 return;
1636 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1637 fullintervalcount += vm_pageout_stats_interval;
1638 if (fullintervalcount < vm_pageout_full_stats_interval) {
1639 tpcount = (vm_pageout_stats_max * pcount) /
1640 vmstats.v_page_count + 1;
1641 if (pcount > tpcount)
1642 pcount = tpcount;
1643 } else {
1644 fullintervalcount = 0;
1647 bzero(&marker, sizeof(marker));
1648 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1649 marker.queue = PQ_ACTIVE + q;
1650 marker.pc = q;
1651 marker.wire_count = 1;
1653 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1654 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1657 * Queue locked at top of loop to avoid stack marker issues.
1659 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1660 pcount-- > 0)
1662 int actcount;
1664 KKASSERT(m->queue == PQ_ACTIVE + q);
1665 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1666 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1667 &marker, pageq);
1670 * Skip marker pages (atomic against other markers to avoid
1671 * infinite hop-over scans).
1673 if (m->flags & PG_MARKER)
1674 continue;
1677 * Ignore pages we can't busy
1679 if (vm_page_busy_try(m, TRUE))
1680 continue;
1683 * Remaining operations run with the page busy and neither
1684 * the page or the queue will be spin-locked.
1686 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1687 KKASSERT(m->queue == PQ_ACTIVE + q);
1690 * We now have a safely busied page, the page and queue
1691 * spinlocks have been released.
1693 * Ignore held pages
1695 if (m->hold_count) {
1696 vm_page_wakeup(m);
1697 goto next;
1701 * Calculate activity
1703 actcount = 0;
1704 if (m->flags & PG_REFERENCED) {
1705 vm_page_flag_clear(m, PG_REFERENCED);
1706 actcount += 1;
1708 actcount += pmap_ts_referenced(m);
1711 * Update act_count and move page to end of queue.
1713 if (actcount) {
1714 m->act_count += ACT_ADVANCE + actcount;
1715 if (m->act_count > ACT_MAX)
1716 m->act_count = ACT_MAX;
1717 vm_page_and_queue_spin_lock(m);
1718 if (m->queue - m->pc == PQ_ACTIVE) {
1719 TAILQ_REMOVE(
1720 &vm_page_queues[PQ_ACTIVE + q].pl,
1721 m, pageq);
1722 TAILQ_INSERT_TAIL(
1723 &vm_page_queues[PQ_ACTIVE + q].pl,
1724 m, pageq);
1726 vm_page_and_queue_spin_unlock(m);
1727 vm_page_wakeup(m);
1728 goto next;
1731 if (m->act_count == 0) {
1733 * We turn off page access, so that we have
1734 * more accurate RSS stats. We don't do this
1735 * in the normal page deactivation when the
1736 * system is loaded VM wise, because the
1737 * cost of the large number of page protect
1738 * operations would be higher than the value
1739 * of doing the operation.
1741 * We use the marker to save our place so
1742 * we can release the spin lock. both (m)
1743 * and (next) will be invalid.
1745 vm_page_protect(m, VM_PROT_NONE);
1746 vm_page_deactivate(m);
1747 } else {
1748 m->act_count -= min(m->act_count, ACT_DECLINE);
1749 vm_page_and_queue_spin_lock(m);
1750 if (m->queue - m->pc == PQ_ACTIVE) {
1751 TAILQ_REMOVE(
1752 &vm_page_queues[PQ_ACTIVE + q].pl,
1753 m, pageq);
1754 TAILQ_INSERT_TAIL(
1755 &vm_page_queues[PQ_ACTIVE + q].pl,
1756 m, pageq);
1758 vm_page_and_queue_spin_unlock(m);
1760 vm_page_wakeup(m);
1761 next:
1762 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1766 * Remove our local marker
1768 * Page queue still spin-locked.
1770 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1771 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1774 static int
1775 vm_pageout_free_page_calc(vm_size_t count)
1777 if (count < vmstats.v_page_count)
1778 return 0;
1780 * free_reserved needs to include enough for the largest swap pager
1781 * structures plus enough for any pv_entry structs when paging.
1783 * v_free_min normal allocations
1784 * v_free_reserved system allocations
1785 * v_pageout_free_min allocations by pageout daemon
1786 * v_interrupt_free_min low level allocations (e.g swap structures)
1788 if (vmstats.v_page_count > 1024)
1789 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1790 else
1791 vmstats.v_free_min = 64;
1794 * Make sure the vmmeter slop can't blow out our global minimums.
1796 * However, to accomodate weird configurations (vkernels with many
1797 * cpus and little memory, or artifically reduced hw.physmem), do
1798 * not allow v_free_min to exceed 1/20 of ram or the pageout demon
1799 * will go out of control.
1801 if (vmstats.v_free_min < VMMETER_SLOP_COUNT * ncpus * 10)
1802 vmstats.v_free_min = VMMETER_SLOP_COUNT * ncpus * 10;
1803 if (vmstats.v_free_min > vmstats.v_page_count / 20)
1804 vmstats.v_free_min = vmstats.v_page_count / 20;
1806 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1807 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1808 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1809 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1811 return 1;
1816 * vm_pageout is the high level pageout daemon.
1818 * No requirements.
1820 static void
1821 vm_pageout_thread(void)
1823 int pass;
1824 int q;
1825 int q1iterator = 0;
1826 int q2iterator = 0;
1829 * Initialize some paging parameters.
1831 curthread->td_flags |= TDF_SYSTHREAD;
1833 vm_pageout_free_page_calc(vmstats.v_page_count);
1836 * v_free_target and v_cache_min control pageout hysteresis. Note
1837 * that these are more a measure of the VM cache queue hysteresis
1838 * then the VM free queue. Specifically, v_free_target is the
1839 * high water mark (free+cache pages).
1841 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1842 * low water mark, while v_free_min is the stop. v_cache_min must
1843 * be big enough to handle memory needs while the pageout daemon
1844 * is signalled and run to free more pages.
1846 if (vmstats.v_free_count > 6144)
1847 vmstats.v_free_target = 4 * vmstats.v_free_min +
1848 vmstats.v_free_reserved;
1849 else
1850 vmstats.v_free_target = 2 * vmstats.v_free_min +
1851 vmstats.v_free_reserved;
1854 * NOTE: With the new buffer cache b_act_count we want the default
1855 * inactive target to be a percentage of available memory.
1857 * The inactive target essentially determines the minimum
1858 * number of 'temporary' pages capable of caching one-time-use
1859 * files when the VM system is otherwise full of pages
1860 * belonging to multi-time-use files or active program data.
1862 * NOTE: The inactive target is aggressively persued only if the
1863 * inactive queue becomes too small. If the inactive queue
1864 * is large enough to satisfy page movement to free+cache
1865 * then it is repopulated more slowly from the active queue.
1866 * This allows a general inactive_target default to be set.
1868 * There is an issue here for processes which sit mostly idle
1869 * 'overnight', such as sshd, tcsh, and X. Any movement from
1870 * the active queue will eventually cause such pages to
1871 * recycle eventually causing a lot of paging in the morning.
1872 * To reduce the incidence of this pages cycled out of the
1873 * buffer cache are moved directly to the inactive queue if
1874 * they were only used once or twice.
1876 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1877 * Increasing the value (up to 64) increases the number of
1878 * buffer recyclements which go directly to the inactive queue.
1880 if (vmstats.v_free_count > 2048) {
1881 vmstats.v_cache_min = vmstats.v_free_target;
1882 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1883 } else {
1884 vmstats.v_cache_min = 0;
1885 vmstats.v_cache_max = 0;
1887 vmstats.v_inactive_target = vmstats.v_free_count / 4;
1889 /* XXX does not really belong here */
1890 if (vm_page_max_wired == 0)
1891 vm_page_max_wired = vmstats.v_free_count / 3;
1893 if (vm_pageout_stats_max == 0)
1894 vm_pageout_stats_max = vmstats.v_free_target;
1897 * Set interval in seconds for stats scan.
1899 if (vm_pageout_stats_interval == 0)
1900 vm_pageout_stats_interval = 5;
1901 if (vm_pageout_full_stats_interval == 0)
1902 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1906 * Set maximum free per pass
1908 if (vm_pageout_stats_free_max == 0)
1909 vm_pageout_stats_free_max = 5;
1911 swap_pager_swap_init();
1912 pass = 0;
1915 * The pageout daemon is never done, so loop forever.
1917 while (TRUE) {
1918 int error;
1919 int avail_shortage;
1920 int inactive_shortage;
1921 int vnodes_skipped = 0;
1922 int recycle_count = 0;
1923 int tmp;
1926 * Wait for an action request. If we timeout check to
1927 * see if paging is needed (in case the normal wakeup
1928 * code raced us).
1930 if (vm_pages_needed == 0) {
1931 error = tsleep(&vm_pages_needed,
1932 0, "psleep",
1933 vm_pageout_stats_interval * hz);
1934 if (error &&
1935 vm_paging_needed() == 0 &&
1936 vm_pages_needed == 0) {
1937 for (q = 0; q < PQ_L2_SIZE; ++q)
1938 vm_pageout_page_stats(q);
1939 continue;
1941 vm_pages_needed = 1;
1944 mycpu->gd_cnt.v_pdwakeups++;
1947 * Scan for INACTIVE->CLEAN/PAGEOUT
1949 * This routine tries to avoid thrashing the system with
1950 * unnecessary activity.
1952 * Calculate our target for the number of free+cache pages we
1953 * want to get to. This is higher then the number that causes
1954 * allocations to stall (severe) in order to provide hysteresis,
1955 * and if we don't make it all the way but get to the minimum
1956 * we're happy. Goose it a bit if there are multiple requests
1957 * for memory.
1959 * Don't reduce avail_shortage inside the loop or the
1960 * PQAVERAGE() calculation will break.
1962 * NOTE! deficit is differentiated from avail_shortage as
1963 * REQUIRING at least (deficit) pages to be cleaned,
1964 * even if the page queues are in good shape. This
1965 * is used primarily for handling per-process
1966 * RLIMIT_RSS and may also see small values when
1967 * processes block due to low memory.
1969 vmstats_rollup();
1970 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1971 vm_pageout_deficit = 0;
1973 if (avail_shortage > 0) {
1974 int delta = 0;
1976 for (q = 0; q < PQ_L2_SIZE; ++q) {
1977 delta += vm_pageout_scan_inactive(
1978 pass,
1979 (q + q1iterator) & PQ_L2_MASK,
1980 PQAVERAGE(avail_shortage),
1981 &vnodes_skipped);
1982 if (avail_shortage - delta <= 0)
1983 break;
1985 avail_shortage -= delta;
1986 q1iterator = q + 1;
1990 * Figure out how many active pages we must deactivate. If
1991 * we were able to reach our target with just the inactive
1992 * scan above we limit the number of active pages we
1993 * deactivate to reduce unnecessary work.
1995 vmstats_rollup();
1996 inactive_shortage = vmstats.v_inactive_target -
1997 vmstats.v_inactive_count;
2000 * If we were unable to free sufficient inactive pages to
2001 * satisfy the free/cache queue requirements then simply
2002 * reaching the inactive target may not be good enough.
2003 * Try to deactivate pages in excess of the target based
2004 * on the shortfall.
2006 * However to prevent thrashing the VM system do not
2007 * deactivate more than an additional 1/10 the inactive
2008 * target's worth of active pages.
2010 if (avail_shortage > 0) {
2011 tmp = avail_shortage * 2;
2012 if (tmp > vmstats.v_inactive_target / 10)
2013 tmp = vmstats.v_inactive_target / 10;
2014 inactive_shortage += tmp;
2018 * Only trigger a pmap cleanup on inactive shortage.
2020 if (inactive_shortage > 0) {
2021 pmap_collect();
2025 * Scan for ACTIVE->INACTIVE
2027 * Only trigger on inactive shortage. Triggering on
2028 * avail_shortage can starve the active queue with
2029 * unnecessary active->inactive transitions and destroy
2030 * performance.
2032 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2033 int delta = 0;
2035 for (q = 0; q < PQ_L2_SIZE; ++q) {
2036 delta += vm_pageout_scan_active(
2037 pass,
2038 (q + q2iterator) & PQ_L2_MASK,
2039 PQAVERAGE(avail_shortage),
2040 PQAVERAGE(inactive_shortage),
2041 &recycle_count);
2042 if (inactive_shortage - delta <= 0 &&
2043 avail_shortage - delta <= 0) {
2044 break;
2047 inactive_shortage -= delta;
2048 avail_shortage -= delta;
2049 q2iterator = q + 1;
2053 * Scan for CACHE->FREE
2055 * Finally free enough cache pages to meet our free page
2056 * requirement and take more drastic measures if we are
2057 * still in trouble.
2059 vmstats_rollup();
2060 vm_pageout_scan_cache(avail_shortage, pass,
2061 vnodes_skipped, recycle_count);
2064 * Wait for more work.
2066 if (avail_shortage > 0) {
2067 ++pass;
2068 if (pass < 10 && vm_pages_needed > 1) {
2070 * Normal operation, additional processes
2071 * have already kicked us. Retry immediately
2072 * unless swap space is completely full in
2073 * which case delay a bit.
2075 if (swap_pager_full) {
2076 tsleep(&vm_pages_needed, 0, "pdelay",
2077 hz / 5);
2078 } /* else immediate retry */
2079 } else if (pass < 10) {
2081 * Normal operation, fewer processes. Delay
2082 * a bit but allow wakeups.
2084 vm_pages_needed = 0;
2085 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2086 vm_pages_needed = 1;
2087 } else if (swap_pager_full == 0) {
2089 * We've taken too many passes, forced delay.
2091 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2092 } else {
2094 * Running out of memory, catastrophic
2095 * back-off to one-second intervals.
2097 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2099 } else if (vm_pages_needed) {
2101 * Interlocked wakeup of waiters (non-optional).
2103 * Similar to vm_page_free_wakeup() in vm_page.c,
2104 * wake
2106 pass = 0;
2107 if (!vm_page_count_min(vm_page_free_hysteresis) ||
2108 !vm_page_count_target()) {
2109 vm_pages_needed = 0;
2110 wakeup(&vmstats.v_free_count);
2112 } else {
2113 pass = 0;
2118 static struct kproc_desc page_kp = {
2119 "pagedaemon",
2120 vm_pageout_thread,
2121 &pagethread
2123 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2127 * Called after allocating a page out of the cache or free queue
2128 * to possibly wake the pagedaemon up to replentish our supply.
2130 * We try to generate some hysteresis by waking the pagedaemon up
2131 * when our free+cache pages go below the free_min+cache_min level.
2132 * The pagedaemon tries to get the count back up to at least the
2133 * minimum, and through to the target level if possible.
2135 * If the pagedaemon is already active bump vm_pages_needed as a hint
2136 * that there are even more requests pending.
2138 * SMP races ok?
2139 * No requirements.
2141 void
2142 pagedaemon_wakeup(void)
2144 if (vm_paging_needed() && curthread != pagethread) {
2145 if (vm_pages_needed == 0) {
2146 vm_pages_needed = 1; /* SMP race ok */
2147 wakeup(&vm_pages_needed);
2148 } else if (vm_page_count_min(0)) {
2149 ++vm_pages_needed; /* SMP race ok */
2154 #if !defined(NO_SWAPPING)
2157 * SMP races ok?
2158 * No requirements.
2160 static void
2161 vm_req_vmdaemon(void)
2163 static int lastrun = 0;
2165 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2166 wakeup(&vm_daemon_needed);
2167 lastrun = ticks;
2171 static int vm_daemon_callback(struct proc *p, void *data __unused);
2174 * No requirements.
2176 static void
2177 vm_daemon(void)
2179 int req_swapout;
2181 while (TRUE) {
2182 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2183 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2186 * forced swapouts
2188 if (req_swapout)
2189 swapout_procs(vm_pageout_req_swapout);
2192 * scan the processes for exceeding their rlimits or if
2193 * process is swapped out -- deactivate pages
2195 allproc_scan(vm_daemon_callback, NULL);
2199 static int
2200 vm_daemon_callback(struct proc *p, void *data __unused)
2202 struct vmspace *vm;
2203 vm_pindex_t limit, size;
2206 * if this is a system process or if we have already
2207 * looked at this process, skip it.
2209 lwkt_gettoken(&p->p_token);
2211 if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2212 lwkt_reltoken(&p->p_token);
2213 return (0);
2217 * if the process is in a non-running type state,
2218 * don't touch it.
2220 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2221 lwkt_reltoken(&p->p_token);
2222 return (0);
2226 * get a limit
2228 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2229 p->p_rlimit[RLIMIT_RSS].rlim_max));
2232 * let processes that are swapped out really be
2233 * swapped out. Set the limit to nothing to get as
2234 * many pages out to swap as possible.
2236 if (p->p_flags & P_SWAPPEDOUT)
2237 limit = 0;
2239 vm = p->p_vmspace;
2240 vmspace_hold(vm);
2241 size = pmap_resident_tlnw_count(&vm->vm_pmap);
2242 if (limit >= 0 && size > 4096 &&
2243 size - 4096 >= limit && vm_pageout_memuse_mode >= 1) {
2244 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2246 vmspace_drop(vm);
2248 lwkt_reltoken(&p->p_token);
2250 return (0);
2253 #endif