kernel - Refactor struct vmstats and vm_zone
[dragonfly.git] / sys / vm / vm_pageout.c
blobbaf7e30be5be661feaff3b32f8fa02df476ee18d
1 /*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
68 * The proverbial page-out daemon.
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
99 * System initialization
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_page(vm_page_t m, int *max_launderp,
104 int *vnodes_skippedp, struct vnode **vpfailedp,
105 int pass, int vmflush_flags);
106 static int vm_pageout_clean_helper (vm_page_t, int);
107 static int vm_pageout_free_page_calc (vm_size_t count);
108 static void vm_pageout_page_free(vm_page_t m) ;
109 struct thread *pagethread;
111 #if !defined(NO_SWAPPING)
112 /* the kernel process "vm_daemon"*/
113 static void vm_daemon (void);
114 static struct thread *vmthread;
116 static struct kproc_desc vm_kp = {
117 "vmdaemon",
118 vm_daemon,
119 &vmthread
121 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
122 #endif
124 int vm_pages_needed = 0; /* Event on which pageout daemon sleeps */
125 int vm_pageout_deficit = 0; /* Estimated number of pages deficit */
126 int vm_pageout_pages_needed = 0;/* pageout daemon needs pages */
127 int vm_page_free_hysteresis = 16;
129 #if !defined(NO_SWAPPING)
130 static int vm_pageout_req_swapout;
131 static int vm_daemon_needed;
132 #endif
133 static int vm_max_launder = 4096;
134 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
135 static int vm_pageout_full_stats_interval = 0;
136 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
137 static int defer_swap_pageouts=0;
138 static int disable_swap_pageouts=0;
139 static u_int vm_anonmem_decline = ACT_DECLINE;
140 static u_int vm_filemem_decline = ACT_DECLINE * 2;
142 #if defined(NO_SWAPPING)
143 static int vm_swap_enabled=0;
144 static int vm_swap_idle_enabled=0;
145 #else
146 static int vm_swap_enabled=1;
147 static int vm_swap_idle_enabled=0;
148 #endif
149 int vm_pageout_memuse_mode=1; /* 0-disable, 1-passive, 2-active swp*/
151 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
152 CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
154 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
155 CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
157 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis,
158 CTLFLAG_RW, &vm_page_free_hysteresis, 0,
159 "Free more pages than the minimum required");
161 SYSCTL_INT(_vm, OID_AUTO, max_launder,
162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
167 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
174 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
175 SYSCTL_INT(_vm, OID_AUTO, pageout_memuse_mode,
176 CTLFLAG_RW, &vm_pageout_memuse_mode, 0, "memoryuse resource mode");
178 #if defined(NO_SWAPPING)
179 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
180 CTLFLAG_RD, &vm_swap_enabled, 0, "");
181 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
183 #else
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
188 #endif
190 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
193 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
196 static int pageout_lock_miss;
197 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
200 int vm_page_max_wired; /* XXX max # of wired pages system-wide */
202 #if !defined(NO_SWAPPING)
203 static void vm_req_vmdaemon (void);
204 #endif
205 static void vm_pageout_page_stats(int q);
208 * Calculate approximately how many pages on each queue to try to
209 * clean. An exact calculation creates an edge condition when the
210 * queues are unbalanced so add significant slop. The queue scans
211 * will stop early when targets are reached and will start where they
212 * left off on the next pass.
214 * We need to be generous here because there are all sorts of loading
215 * conditions that can cause edge cases if try to average over all queues.
216 * In particular, storage subsystems have become so fast that paging
217 * activity can become quite frantic. Eventually we will probably need
218 * two paging threads, one for dirty pages and one for clean, to deal
219 * with the bandwidth requirements.
221 * So what we do is calculate a value that can be satisfied nominally by
222 * only having to scan half the queues.
224 static __inline int
225 PQAVERAGE(int n)
227 int avg;
229 if (n >= 0) {
230 avg = ((n + (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) + 1);
231 } else {
232 avg = ((n - (PQ_L2_SIZE - 1)) / (PQ_L2_SIZE / 2) - 1);
234 return avg;
238 * vm_pageout_clean_helper:
240 * Clean the page and remove it from the laundry. The page must not be
241 * busy on-call.
243 * We set the busy bit to cause potential page faults on this page to
244 * block. Note the careful timing, however, the busy bit isn't set till
245 * late and we cannot do anything that will mess with the page.
247 static int
248 vm_pageout_clean_helper(vm_page_t m, int vmflush_flags)
250 vm_object_t object;
251 vm_page_t mc[BLIST_MAX_ALLOC];
252 int error;
253 int ib, is, page_base;
254 vm_pindex_t pindex = m->pindex;
256 object = m->object;
259 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
260 * with the new swapper, but we could have serious problems paging
261 * out other object types if there is insufficient memory.
263 * Unfortunately, checking free memory here is far too late, so the
264 * check has been moved up a procedural level.
268 * Don't mess with the page if it's busy, held, or special
270 * XXX do we really need to check hold_count here? hold_count
271 * isn't supposed to mess with vm_page ops except prevent the
272 * page from being reused.
274 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
275 vm_page_wakeup(m);
276 return 0;
280 * Place page in cluster. Align cluster for optimal swap space
281 * allocation (whether it is swap or not). This is typically ~16-32
282 * pages, which also tends to align the cluster to multiples of the
283 * filesystem block size if backed by a filesystem.
285 page_base = pindex % BLIST_MAX_ALLOC;
286 mc[page_base] = m;
287 ib = page_base - 1;
288 is = page_base + 1;
291 * Scan object for clusterable pages.
293 * We can cluster ONLY if: ->> the page is NOT
294 * clean, wired, busy, held, or mapped into a
295 * buffer, and one of the following:
296 * 1) The page is inactive, or a seldom used
297 * active page.
298 * -or-
299 * 2) we force the issue.
301 * During heavy mmap/modification loads the pageout
302 * daemon can really fragment the underlying file
303 * due to flushing pages out of order and not trying
304 * align the clusters (which leave sporatic out-of-order
305 * holes). To solve this problem we do the reverse scan
306 * first and attempt to align our cluster, then do a
307 * forward scan if room remains.
309 vm_object_hold(object);
311 while (ib >= 0) {
312 vm_page_t p;
314 p = vm_page_lookup_busy_try(object, pindex - page_base + ib,
315 TRUE, &error);
316 if (error || p == NULL)
317 break;
318 if ((p->queue - p->pc) == PQ_CACHE ||
319 (p->flags & PG_UNMANAGED)) {
320 vm_page_wakeup(p);
321 break;
323 vm_page_test_dirty(p);
324 if (((p->dirty & p->valid) == 0 &&
325 (p->flags & PG_NEED_COMMIT) == 0) ||
326 p->wire_count != 0 || /* may be held by buf cache */
327 p->hold_count != 0) { /* may be undergoing I/O */
328 vm_page_wakeup(p);
329 break;
331 if (p->queue - p->pc != PQ_INACTIVE) {
332 if (p->queue - p->pc != PQ_ACTIVE ||
333 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
334 vm_page_wakeup(p);
335 break;
340 * Try to maintain page groupings in the cluster.
342 if (m->flags & PG_WINATCFLS)
343 vm_page_flag_set(p, PG_WINATCFLS);
344 else
345 vm_page_flag_clear(p, PG_WINATCFLS);
346 p->act_count = m->act_count;
348 mc[ib] = p;
349 --ib;
351 ++ib; /* fixup */
353 while (is < BLIST_MAX_ALLOC &&
354 pindex - page_base + is < object->size) {
355 vm_page_t p;
357 p = vm_page_lookup_busy_try(object, pindex - page_base + is,
358 TRUE, &error);
359 if (error || p == NULL)
360 break;
361 if (((p->queue - p->pc) == PQ_CACHE) ||
362 (p->flags & PG_UNMANAGED)) {
363 vm_page_wakeup(p);
364 break;
366 vm_page_test_dirty(p);
367 if (((p->dirty & p->valid) == 0 &&
368 (p->flags & PG_NEED_COMMIT) == 0) ||
369 p->wire_count != 0 || /* may be held by buf cache */
370 p->hold_count != 0) { /* may be undergoing I/O */
371 vm_page_wakeup(p);
372 break;
374 if (p->queue - p->pc != PQ_INACTIVE) {
375 if (p->queue - p->pc != PQ_ACTIVE ||
376 (vmflush_flags & VM_PAGER_ALLOW_ACTIVE) == 0) {
377 vm_page_wakeup(p);
378 break;
383 * Try to maintain page groupings in the cluster.
385 if (m->flags & PG_WINATCFLS)
386 vm_page_flag_set(p, PG_WINATCFLS);
387 else
388 vm_page_flag_clear(p, PG_WINATCFLS);
389 p->act_count = m->act_count;
391 mc[is] = p;
392 ++is;
395 vm_object_drop(object);
398 * we allow reads during pageouts...
400 return vm_pageout_flush(&mc[ib], is - ib, vmflush_flags);
404 * vm_pageout_flush() - launder the given pages
406 * The given pages are laundered. Note that we setup for the start of
407 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
408 * reference count all in here rather then in the parent. If we want
409 * the parent to do more sophisticated things we may have to change
410 * the ordering.
412 * The pages in the array must be busied by the caller and will be
413 * unbusied by this function.
416 vm_pageout_flush(vm_page_t *mc, int count, int vmflush_flags)
418 vm_object_t object;
419 int pageout_status[count];
420 int numpagedout = 0;
421 int i;
424 * Initiate I/O. Bump the vm_page_t->busy counter.
426 for (i = 0; i < count; i++) {
427 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
428 ("vm_pageout_flush page %p index %d/%d: partially "
429 "invalid page", mc[i], i, count));
430 vm_page_io_start(mc[i]);
434 * We must make the pages read-only. This will also force the
435 * modified bit in the related pmaps to be cleared. The pager
436 * cannot clear the bit for us since the I/O completion code
437 * typically runs from an interrupt. The act of making the page
438 * read-only handles the case for us.
440 * Then we can unbusy the pages, we still hold a reference by virtue
441 * of our soft-busy.
443 for (i = 0; i < count; i++) {
444 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE)
445 vm_page_protect(mc[i], VM_PROT_NONE);
446 else
447 vm_page_protect(mc[i], VM_PROT_READ);
448 vm_page_wakeup(mc[i]);
451 object = mc[0]->object;
452 vm_object_pip_add(object, count);
454 vm_pager_put_pages(object, mc, count,
455 (vmflush_flags |
456 ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
457 pageout_status);
459 for (i = 0; i < count; i++) {
460 vm_page_t mt = mc[i];
462 switch (pageout_status[i]) {
463 case VM_PAGER_OK:
464 numpagedout++;
465 break;
466 case VM_PAGER_PEND:
467 numpagedout++;
468 break;
469 case VM_PAGER_BAD:
471 * Page outside of range of object. Right now we
472 * essentially lose the changes by pretending it
473 * worked.
475 vm_page_busy_wait(mt, FALSE, "pgbad");
476 pmap_clear_modify(mt);
477 vm_page_undirty(mt);
478 vm_page_wakeup(mt);
479 break;
480 case VM_PAGER_ERROR:
481 case VM_PAGER_FAIL:
483 * A page typically cannot be paged out when we
484 * have run out of swap. We leave the page
485 * marked inactive and will try to page it out
486 * again later.
488 * Starvation of the active page list is used to
489 * determine when the system is massively memory
490 * starved.
492 break;
493 case VM_PAGER_AGAIN:
494 break;
498 * If not PENDing this was a synchronous operation and we
499 * clean up after the I/O. If it is PENDing the mess is
500 * cleaned up asynchronously.
502 * Also nominally act on the caller's wishes if the caller
503 * wants to try to really clean (cache or free) the page.
505 * Also nominally deactivate the page if the system is
506 * memory-stressed.
508 if (pageout_status[i] != VM_PAGER_PEND) {
509 vm_page_busy_wait(mt, FALSE, "pgouw");
510 vm_page_io_finish(mt);
511 if (vmflush_flags & VM_PAGER_TRY_TO_CACHE) {
512 vm_page_try_to_cache(mt);
513 } else if (vm_page_count_severe()) {
514 vm_page_deactivate(mt);
515 vm_page_wakeup(mt);
516 } else {
517 vm_page_wakeup(mt);
519 vm_object_pip_wakeup(object);
522 return numpagedout;
525 #if !defined(NO_SWAPPING)
528 * Callback function, page busied for us. We must dispose of the busy
529 * condition. Any related pmap pages may be held but will not be locked.
531 static
533 vm_pageout_mdp_callback(struct pmap_pgscan_info *info, vm_offset_t va,
534 vm_page_t p)
536 int actcount;
537 int cleanit = 0;
540 * Basic tests - There should never be a marker, and we can stop
541 * once the RSS is below the required level.
543 KKASSERT((p->flags & PG_MARKER) == 0);
544 if (pmap_resident_tlnw_count(info->pmap) <= info->limit) {
545 vm_page_wakeup(p);
546 return(-1);
549 mycpu->gd_cnt.v_pdpages++;
551 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
552 vm_page_wakeup(p);
553 goto done;
556 ++info->actioncount;
559 * Check if the page has been referened recently. If it has,
560 * activate it and skip.
562 actcount = pmap_ts_referenced(p);
563 if (actcount) {
564 vm_page_flag_set(p, PG_REFERENCED);
565 } else if (p->flags & PG_REFERENCED) {
566 actcount = 1;
569 if (actcount) {
570 if (p->queue - p->pc != PQ_ACTIVE) {
571 vm_page_and_queue_spin_lock(p);
572 if (p->queue - p->pc != PQ_ACTIVE) {
573 vm_page_and_queue_spin_unlock(p);
574 vm_page_activate(p);
575 } else {
576 vm_page_and_queue_spin_unlock(p);
578 } else {
579 p->act_count += actcount;
580 if (p->act_count > ACT_MAX)
581 p->act_count = ACT_MAX;
583 vm_page_flag_clear(p, PG_REFERENCED);
584 vm_page_wakeup(p);
585 goto done;
589 * Remove the page from this particular pmap. Once we do this, our
590 * pmap scans will not see it again (unless it gets faulted in), so
591 * we must actively dispose of or deal with the page.
593 pmap_remove_specific(info->pmap, p);
596 * If the page is not mapped to another process (i.e. as would be
597 * typical if this were a shared page from a library) then deactivate
598 * the page and clean it in two passes only.
600 * If the page hasn't been referenced since the last check, remove it
601 * from the pmap. If it is no longer mapped, deactivate it
602 * immediately, accelerating the normal decline.
604 * Once the page has been removed from the pmap the RSS code no
605 * longer tracks it so we have to make sure that it is staged for
606 * potential flush action.
608 if ((p->flags & PG_MAPPED) == 0) {
609 if (p->queue - p->pc == PQ_ACTIVE) {
610 vm_page_deactivate(p);
612 if (p->queue - p->pc == PQ_INACTIVE) {
613 cleanit = 1;
618 * Ok, try to fully clean the page and any nearby pages such that at
619 * least the requested page is freed or moved to the cache queue.
621 * We usually do this synchronously to allow us to get the page into
622 * the CACHE queue quickly, which will prevent memory exhaustion if
623 * a process with a memoryuse limit is running away. However, the
624 * sysadmin may desire to set vm.swap_user_async which relaxes this
625 * and improves write performance.
627 if (cleanit) {
628 int max_launder = 0x7FFF;
629 int vnodes_skipped = 0;
630 int vmflush_flags;
631 struct vnode *vpfailed = NULL;
633 info->offset = va;
635 if (vm_pageout_memuse_mode >= 2) {
636 vmflush_flags = VM_PAGER_TRY_TO_CACHE |
637 VM_PAGER_ALLOW_ACTIVE;
638 if (swap_user_async == 0)
639 vmflush_flags |= VM_PAGER_PUT_SYNC;
640 vm_page_flag_set(p, PG_WINATCFLS);
641 info->cleancount +=
642 vm_pageout_page(p, &max_launder,
643 &vnodes_skipped,
644 &vpfailed, 1, vmflush_flags);
645 } else {
646 vm_page_wakeup(p);
647 ++info->cleancount;
649 } else {
650 vm_page_wakeup(p);
652 done:
653 lwkt_user_yield();
654 return 0;
658 * Deactivate some number of pages in a map due to set RLIMIT_RSS limits.
659 * that is relatively difficult to do. We try to keep track of where we
660 * left off last time to reduce scan overhead.
662 * Called when vm_pageout_memuse_mode is >= 1.
664 void
665 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t limit)
667 vm_offset_t pgout_offset;
668 struct pmap_pgscan_info info;
669 int retries = 3;
671 pgout_offset = map->pgout_offset;
672 again:
673 #if 0
674 kprintf("%016jx ", pgout_offset);
675 #endif
676 if (pgout_offset < VM_MIN_USER_ADDRESS)
677 pgout_offset = VM_MIN_USER_ADDRESS;
678 if (pgout_offset >= VM_MAX_USER_ADDRESS)
679 pgout_offset = 0;
680 info.pmap = vm_map_pmap(map);
681 info.limit = limit;
682 info.beg_addr = pgout_offset;
683 info.end_addr = VM_MAX_USER_ADDRESS;
684 info.callback = vm_pageout_mdp_callback;
685 info.cleancount = 0;
686 info.actioncount = 0;
687 info.busycount = 0;
689 pmap_pgscan(&info);
690 pgout_offset = info.offset;
691 #if 0
692 kprintf("%016jx %08lx %08lx\n", pgout_offset,
693 info.cleancount, info.actioncount);
694 #endif
696 if (pgout_offset != VM_MAX_USER_ADDRESS &&
697 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
698 goto again;
699 } else if (retries &&
700 pmap_resident_tlnw_count(vm_map_pmap(map)) > limit) {
701 --retries;
702 goto again;
704 map->pgout_offset = pgout_offset;
706 #endif
709 * Called when the pageout scan wants to free a page. We no longer
710 * try to cycle the vm_object here with a reference & dealloc, which can
711 * cause a non-trivial object collapse in a critical path.
713 * It is unclear why we cycled the ref_count in the past, perhaps to try
714 * to optimize shadow chain collapses but I don't quite see why it would
715 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages
716 * synchronously and not have to be kicked-start.
718 static void
719 vm_pageout_page_free(vm_page_t m)
721 vm_page_protect(m, VM_PROT_NONE);
722 vm_page_free(m);
726 * vm_pageout_scan does the dirty work for the pageout daemon.
728 struct vm_pageout_scan_info {
729 struct proc *bigproc;
730 vm_offset_t bigsize;
733 static int vm_pageout_scan_callback(struct proc *p, void *data);
735 static int
736 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
737 int *vnodes_skipped)
739 vm_page_t m;
740 struct vm_page marker;
741 struct vnode *vpfailed; /* warning, allowed to be stale */
742 int maxscan;
743 int delta = 0;
744 int max_launder;
747 * Start scanning the inactive queue for pages we can move to the
748 * cache or free. The scan will stop when the target is reached or
749 * we have scanned the entire inactive queue. Note that m->act_count
750 * is not used to form decisions for the inactive queue, only for the
751 * active queue.
753 * max_launder limits the number of dirty pages we flush per scan.
754 * For most systems a smaller value (16 or 32) is more robust under
755 * extreme memory and disk pressure because any unnecessary writes
756 * to disk can result in extreme performance degredation. However,
757 * systems with excessive dirty pages (especially when MAP_NOSYNC is
758 * used) will die horribly with limited laundering. If the pageout
759 * daemon cannot clean enough pages in the first pass, we let it go
760 * all out in succeeding passes.
762 if ((max_launder = vm_max_launder) <= 1)
763 max_launder = 1;
764 if (pass)
765 max_launder = 10000;
768 * Initialize our marker
770 bzero(&marker, sizeof(marker));
771 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
772 marker.queue = PQ_INACTIVE + q;
773 marker.pc = q;
774 marker.wire_count = 1;
777 * Inactive queue scan.
779 * NOTE: The vm_page must be spinlocked before the queue to avoid
780 * deadlocks, so it is easiest to simply iterate the loop
781 * with the queue unlocked at the top.
783 vpfailed = NULL;
785 vm_page_queues_spin_lock(PQ_INACTIVE + q);
786 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
787 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
790 * Queue locked at top of loop to avoid stack marker issues.
792 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
793 maxscan-- > 0 && avail_shortage - delta > 0)
795 int count;
797 KKASSERT(m->queue == PQ_INACTIVE + q);
798 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
799 &marker, pageq);
800 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
801 &marker, pageq);
802 mycpu->gd_cnt.v_pdpages++;
805 * Skip marker pages (atomic against other markers to avoid
806 * infinite hop-over scans).
808 if (m->flags & PG_MARKER)
809 continue;
812 * Try to busy the page. Don't mess with pages which are
813 * already busy or reorder them in the queue.
815 if (vm_page_busy_try(m, TRUE))
816 continue;
819 * Remaining operations run with the page busy and neither
820 * the page or the queue will be spin-locked.
822 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
823 KKASSERT(m->queue == PQ_INACTIVE + q);
825 count = vm_pageout_page(m, &max_launder, vnodes_skipped,
826 &vpfailed, pass, 0);
827 delta += count;
830 * Systems with a ton of memory can wind up with huge
831 * deactivation counts. Because the inactive scan is
832 * doing a lot of flushing, the combination can result
833 * in excessive paging even in situations where other
834 * unrelated threads free up sufficient VM.
836 * To deal with this we abort the nominal active->inactive
837 * scan before we hit the inactive target when free+cache
838 * levels have reached a reasonable target.
840 * When deciding to stop early we need to add some slop to
841 * the test and we need to return full completion to the caller
842 * to prevent the caller from thinking there is something
843 * wrong and issuing a low-memory+swap warning or pkill.
845 * A deficit forces paging regardless of the state of the
846 * VM page queues (used for RSS enforcement).
848 lwkt_yield();
849 vm_page_queues_spin_lock(PQ_INACTIVE + q);
850 if (vm_paging_target() < -vm_max_launder) {
852 * Stopping early, return full completion to caller.
854 if (delta < avail_shortage)
855 delta = avail_shortage;
856 break;
860 /* page queue still spin-locked */
861 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
862 vm_page_queues_spin_unlock(PQ_INACTIVE + q);
864 return (delta);
868 * Pageout the specified page, return the total number of pages paged out
869 * (this routine may cluster).
871 * The page must be busied and soft-busied by the caller and will be disposed
872 * of by this function.
874 static int
875 vm_pageout_page(vm_page_t m, int *max_launderp, int *vnodes_skippedp,
876 struct vnode **vpfailedp, int pass, int vmflush_flags)
878 vm_object_t object;
879 int actcount;
880 int count = 0;
883 * It is possible for a page to be busied ad-hoc (e.g. the
884 * pmap_collect() code) and wired and race against the
885 * allocation of a new page. vm_page_alloc() may be forced
886 * to deactivate the wired page in which case it winds up
887 * on the inactive queue and must be handled here. We
888 * correct the problem simply by unqueuing the page.
890 if (m->wire_count) {
891 vm_page_unqueue_nowakeup(m);
892 vm_page_wakeup(m);
893 kprintf("WARNING: pagedaemon: wired page on "
894 "inactive queue %p\n", m);
895 return 0;
899 * A held page may be undergoing I/O, so skip it.
901 if (m->hold_count) {
902 vm_page_and_queue_spin_lock(m);
903 if (m->queue - m->pc == PQ_INACTIVE) {
904 TAILQ_REMOVE(
905 &vm_page_queues[m->queue].pl, m, pageq);
906 TAILQ_INSERT_TAIL(
907 &vm_page_queues[m->queue].pl, m, pageq);
908 ++vm_swapcache_inactive_heuristic;
910 vm_page_and_queue_spin_unlock(m);
911 vm_page_wakeup(m);
912 return 0;
915 if (m->object == NULL || m->object->ref_count == 0) {
917 * If the object is not being used, we ignore previous
918 * references.
920 vm_page_flag_clear(m, PG_REFERENCED);
921 pmap_clear_reference(m);
922 /* fall through to end */
923 } else if (((m->flags & PG_REFERENCED) == 0) &&
924 (actcount = pmap_ts_referenced(m))) {
926 * Otherwise, if the page has been referenced while
927 * in the inactive queue, we bump the "activation
928 * count" upwards, making it less likely that the
929 * page will be added back to the inactive queue
930 * prematurely again. Here we check the page tables
931 * (or emulated bits, if any), given the upper level
932 * VM system not knowing anything about existing
933 * references.
935 vm_page_activate(m);
936 m->act_count += (actcount + ACT_ADVANCE);
937 vm_page_wakeup(m);
938 return 0;
942 * (m) is still busied.
944 * If the upper level VM system knows about any page
945 * references, we activate the page. We also set the
946 * "activation count" higher than normal so that we will less
947 * likely place pages back onto the inactive queue again.
949 if ((m->flags & PG_REFERENCED) != 0) {
950 vm_page_flag_clear(m, PG_REFERENCED);
951 actcount = pmap_ts_referenced(m);
952 vm_page_activate(m);
953 m->act_count += (actcount + ACT_ADVANCE + 1);
954 vm_page_wakeup(m);
955 return 0;
959 * If the upper level VM system doesn't know anything about
960 * the page being dirty, we have to check for it again. As
961 * far as the VM code knows, any partially dirty pages are
962 * fully dirty.
964 * Pages marked PG_WRITEABLE may be mapped into the user
965 * address space of a process running on another cpu. A
966 * user process (without holding the MP lock) running on
967 * another cpu may be able to touch the page while we are
968 * trying to remove it. vm_page_cache() will handle this
969 * case for us.
971 if (m->dirty == 0) {
972 vm_page_test_dirty(m);
973 } else {
974 vm_page_dirty(m);
977 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
979 * Invalid pages can be easily freed
981 vm_pageout_page_free(m);
982 mycpu->gd_cnt.v_dfree++;
983 ++count;
984 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
986 * Clean pages can be placed onto the cache queue.
987 * This effectively frees them.
989 vm_page_cache(m);
990 ++count;
991 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
993 * Dirty pages need to be paged out, but flushing
994 * a page is extremely expensive verses freeing
995 * a clean page. Rather then artificially limiting
996 * the number of pages we can flush, we instead give
997 * dirty pages extra priority on the inactive queue
998 * by forcing them to be cycled through the queue
999 * twice before being flushed, after which the
1000 * (now clean) page will cycle through once more
1001 * before being freed. This significantly extends
1002 * the thrash point for a heavily loaded machine.
1004 vm_page_flag_set(m, PG_WINATCFLS);
1005 vm_page_and_queue_spin_lock(m);
1006 if (m->queue - m->pc == PQ_INACTIVE) {
1007 TAILQ_REMOVE(
1008 &vm_page_queues[m->queue].pl, m, pageq);
1009 TAILQ_INSERT_TAIL(
1010 &vm_page_queues[m->queue].pl, m, pageq);
1011 ++vm_swapcache_inactive_heuristic;
1013 vm_page_and_queue_spin_unlock(m);
1014 vm_page_wakeup(m);
1015 } else if (*max_launderp > 0) {
1017 * We always want to try to flush some dirty pages if
1018 * we encounter them, to keep the system stable.
1019 * Normally this number is small, but under extreme
1020 * pressure where there are insufficient clean pages
1021 * on the inactive queue, we may have to go all out.
1023 int swap_pageouts_ok;
1024 struct vnode *vp = NULL;
1026 swap_pageouts_ok = 0;
1027 object = m->object;
1028 if (object &&
1029 (object->type != OBJT_SWAP) &&
1030 (object->type != OBJT_DEFAULT)) {
1031 swap_pageouts_ok = 1;
1032 } else {
1033 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1034 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1035 vm_page_count_min(0));
1039 * We don't bother paging objects that are "dead".
1040 * Those objects are in a "rundown" state.
1042 if (!swap_pageouts_ok ||
1043 (object == NULL) ||
1044 (object->flags & OBJ_DEAD)) {
1045 vm_page_and_queue_spin_lock(m);
1046 if (m->queue - m->pc == PQ_INACTIVE) {
1047 TAILQ_REMOVE(
1048 &vm_page_queues[m->queue].pl,
1049 m, pageq);
1050 TAILQ_INSERT_TAIL(
1051 &vm_page_queues[m->queue].pl,
1052 m, pageq);
1053 ++vm_swapcache_inactive_heuristic;
1055 vm_page_and_queue_spin_unlock(m);
1056 vm_page_wakeup(m);
1057 return 0;
1061 * (m) is still busied.
1063 * The object is already known NOT to be dead. It
1064 * is possible for the vget() to block the whole
1065 * pageout daemon, but the new low-memory handling
1066 * code should prevent it.
1068 * The previous code skipped locked vnodes and, worse,
1069 * reordered pages in the queue. This results in
1070 * completely non-deterministic operation because,
1071 * quite often, a vm_fault has initiated an I/O and
1072 * is holding a locked vnode at just the point where
1073 * the pageout daemon is woken up.
1075 * We can't wait forever for the vnode lock, we might
1076 * deadlock due to a vn_read() getting stuck in
1077 * vm_wait while holding this vnode. We skip the
1078 * vnode if we can't get it in a reasonable amount
1079 * of time.
1081 * vpfailed is used to (try to) avoid the case where
1082 * a large number of pages are associated with a
1083 * locked vnode, which could cause the pageout daemon
1084 * to stall for an excessive amount of time.
1086 if (object->type == OBJT_VNODE) {
1087 int flags;
1089 vp = object->handle;
1090 flags = LK_EXCLUSIVE;
1091 if (vp == *vpfailedp)
1092 flags |= LK_NOWAIT;
1093 else
1094 flags |= LK_TIMELOCK;
1095 vm_page_hold(m);
1096 vm_page_wakeup(m);
1099 * We have unbusied (m) temporarily so we can
1100 * acquire the vp lock without deadlocking.
1101 * (m) is held to prevent destruction.
1103 if (vget(vp, flags) != 0) {
1104 *vpfailedp = vp;
1105 ++pageout_lock_miss;
1106 if (object->flags & OBJ_MIGHTBEDIRTY)
1107 ++*vnodes_skippedp;
1108 vm_page_unhold(m);
1109 return 0;
1113 * The page might have been moved to another
1114 * queue during potential blocking in vget()
1115 * above. The page might have been freed and
1116 * reused for another vnode. The object might
1117 * have been reused for another vnode.
1119 if (m->queue - m->pc != PQ_INACTIVE ||
1120 m->object != object ||
1121 object->handle != vp) {
1122 if (object->flags & OBJ_MIGHTBEDIRTY)
1123 ++*vnodes_skippedp;
1124 vput(vp);
1125 vm_page_unhold(m);
1126 return 0;
1130 * The page may have been busied during the
1131 * blocking in vput(); We don't move the
1132 * page back onto the end of the queue so that
1133 * statistics are more correct if we don't.
1135 if (vm_page_busy_try(m, TRUE)) {
1136 vput(vp);
1137 vm_page_unhold(m);
1138 return 0;
1140 vm_page_unhold(m);
1143 * (m) is busied again
1145 * We own the busy bit and remove our hold
1146 * bit. If the page is still held it
1147 * might be undergoing I/O, so skip it.
1149 if (m->hold_count) {
1150 vm_page_and_queue_spin_lock(m);
1151 if (m->queue - m->pc == PQ_INACTIVE) {
1152 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
1153 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1154 ++vm_swapcache_inactive_heuristic;
1156 vm_page_and_queue_spin_unlock(m);
1157 if (object->flags & OBJ_MIGHTBEDIRTY)
1158 ++*vnodes_skippedp;
1159 vm_page_wakeup(m);
1160 vput(vp);
1161 return 0;
1163 /* (m) is left busied as we fall through */
1167 * page is busy and not held here.
1169 * If a page is dirty, then it is either being washed
1170 * (but not yet cleaned) or it is still in the
1171 * laundry. If it is still in the laundry, then we
1172 * start the cleaning operation.
1174 * decrement inactive_shortage on success to account
1175 * for the (future) cleaned page. Otherwise we
1176 * could wind up laundering or cleaning too many
1177 * pages.
1179 * NOTE: Cleaning the page here does not cause
1180 * force_deficit to be adjusted, because the
1181 * page is not being freed or moved to the
1182 * cache.
1184 count = vm_pageout_clean_helper(m, vmflush_flags);
1185 *max_launderp -= count;
1188 * Clean ate busy, page no longer accessible
1190 if (vp != NULL)
1191 vput(vp);
1192 } else {
1193 vm_page_wakeup(m);
1195 return count;
1198 static int
1199 vm_pageout_scan_active(int pass, int q,
1200 int avail_shortage, int inactive_shortage,
1201 int *recycle_countp)
1203 struct vm_page marker;
1204 vm_page_t m;
1205 int actcount;
1206 int delta = 0;
1207 int maxscan;
1210 * We want to move pages from the active queue to the inactive
1211 * queue to get the inactive queue to the inactive target. If
1212 * we still have a page shortage from above we try to directly free
1213 * clean pages instead of moving them.
1215 * If we do still have a shortage we keep track of the number of
1216 * pages we free or cache (recycle_count) as a measure of thrashing
1217 * between the active and inactive queues.
1219 * If we were able to completely satisfy the free+cache targets
1220 * from the inactive pool we limit the number of pages we move
1221 * from the active pool to the inactive pool to 2x the pages we
1222 * had removed from the inactive pool (with a minimum of 1/5 the
1223 * inactive target). If we were not able to completely satisfy
1224 * the free+cache targets we go for the whole target aggressively.
1226 * NOTE: Both variables can end up negative.
1227 * NOTE: We are still in a critical section.
1230 bzero(&marker, sizeof(marker));
1231 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1232 marker.queue = PQ_ACTIVE + q;
1233 marker.pc = q;
1234 marker.wire_count = 1;
1236 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1237 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1238 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1241 * Queue locked at top of loop to avoid stack marker issues.
1243 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1244 maxscan-- > 0 && (avail_shortage - delta > 0 ||
1245 inactive_shortage > 0))
1247 KKASSERT(m->queue == PQ_ACTIVE + q);
1248 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1249 &marker, pageq);
1250 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1251 &marker, pageq);
1254 * Skip marker pages (atomic against other markers to avoid
1255 * infinite hop-over scans).
1257 if (m->flags & PG_MARKER)
1258 continue;
1261 * Try to busy the page. Don't mess with pages which are
1262 * already busy or reorder them in the queue.
1264 if (vm_page_busy_try(m, TRUE))
1265 continue;
1268 * Remaining operations run with the page busy and neither
1269 * the page or the queue will be spin-locked.
1271 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1272 KKASSERT(m->queue == PQ_ACTIVE + q);
1275 * Don't deactivate pages that are held, even if we can
1276 * busy them. (XXX why not?)
1278 if (m->hold_count != 0) {
1279 vm_page_and_queue_spin_lock(m);
1280 if (m->queue - m->pc == PQ_ACTIVE) {
1281 TAILQ_REMOVE(
1282 &vm_page_queues[PQ_ACTIVE + q].pl,
1283 m, pageq);
1284 TAILQ_INSERT_TAIL(
1285 &vm_page_queues[PQ_ACTIVE + q].pl,
1286 m, pageq);
1288 vm_page_and_queue_spin_unlock(m);
1289 vm_page_wakeup(m);
1290 goto next;
1294 * The count for pagedaemon pages is done after checking the
1295 * page for eligibility...
1297 mycpu->gd_cnt.v_pdpages++;
1300 * Check to see "how much" the page has been used and clear
1301 * the tracking access bits. If the object has no references
1302 * don't bother paying the expense.
1304 actcount = 0;
1305 if (m->object && m->object->ref_count != 0) {
1306 if (m->flags & PG_REFERENCED)
1307 ++actcount;
1308 actcount += pmap_ts_referenced(m);
1309 if (actcount) {
1310 m->act_count += ACT_ADVANCE + actcount;
1311 if (m->act_count > ACT_MAX)
1312 m->act_count = ACT_MAX;
1315 vm_page_flag_clear(m, PG_REFERENCED);
1318 * actcount is only valid if the object ref_count is non-zero.
1319 * If the page does not have an object, actcount will be zero.
1321 if (actcount && m->object->ref_count != 0) {
1322 vm_page_and_queue_spin_lock(m);
1323 if (m->queue - m->pc == PQ_ACTIVE) {
1324 TAILQ_REMOVE(
1325 &vm_page_queues[PQ_ACTIVE + q].pl,
1326 m, pageq);
1327 TAILQ_INSERT_TAIL(
1328 &vm_page_queues[PQ_ACTIVE + q].pl,
1329 m, pageq);
1331 vm_page_and_queue_spin_unlock(m);
1332 vm_page_wakeup(m);
1333 } else {
1334 switch(m->object->type) {
1335 case OBJT_DEFAULT:
1336 case OBJT_SWAP:
1337 m->act_count -= min(m->act_count,
1338 vm_anonmem_decline);
1339 break;
1340 default:
1341 m->act_count -= min(m->act_count,
1342 vm_filemem_decline);
1343 break;
1345 if (vm_pageout_algorithm ||
1346 (m->object == NULL) ||
1347 (m->object && (m->object->ref_count == 0)) ||
1348 m->act_count < pass + 1
1351 * Deactivate the page. If we had a
1352 * shortage from our inactive scan try to
1353 * free (cache) the page instead.
1355 * Don't just blindly cache the page if
1356 * we do not have a shortage from the
1357 * inactive scan, that could lead to
1358 * gigabytes being moved.
1360 --inactive_shortage;
1361 if (avail_shortage - delta > 0 ||
1362 (m->object && (m->object->ref_count == 0)))
1364 if (avail_shortage - delta > 0)
1365 ++*recycle_countp;
1366 vm_page_protect(m, VM_PROT_NONE);
1367 if (m->dirty == 0 &&
1368 (m->flags & PG_NEED_COMMIT) == 0 &&
1369 avail_shortage - delta > 0) {
1370 vm_page_cache(m);
1371 } else {
1372 vm_page_deactivate(m);
1373 vm_page_wakeup(m);
1375 } else {
1376 vm_page_deactivate(m);
1377 vm_page_wakeup(m);
1379 ++delta;
1380 } else {
1381 vm_page_and_queue_spin_lock(m);
1382 if (m->queue - m->pc == PQ_ACTIVE) {
1383 TAILQ_REMOVE(
1384 &vm_page_queues[PQ_ACTIVE + q].pl,
1385 m, pageq);
1386 TAILQ_INSERT_TAIL(
1387 &vm_page_queues[PQ_ACTIVE + q].pl,
1388 m, pageq);
1390 vm_page_and_queue_spin_unlock(m);
1391 vm_page_wakeup(m);
1394 next:
1395 lwkt_yield();
1396 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1400 * Clean out our local marker.
1402 * Page queue still spin-locked.
1404 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1405 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1407 return (delta);
1411 * The number of actually free pages can drop down to v_free_reserved,
1412 * we try to build the free count back above v_free_min. Note that
1413 * vm_paging_needed() also returns TRUE if v_free_count is not at
1414 * least v_free_min so that is the minimum we must build the free
1415 * count to.
1417 * We use a slightly higher target to improve hysteresis,
1418 * ((v_free_target + v_free_min) / 2). Since v_free_target
1419 * is usually the same as v_cache_min this maintains about
1420 * half the pages in the free queue as are in the cache queue,
1421 * providing pretty good pipelining for pageout operation.
1423 * The system operator can manipulate vm.v_cache_min and
1424 * vm.v_free_target to tune the pageout demon. Be sure
1425 * to keep vm.v_free_min < vm.v_free_target.
1427 * Note that the original paging target is to get at least
1428 * (free_min + cache_min) into (free + cache). The slightly
1429 * higher target will shift additional pages from cache to free
1430 * without effecting the original paging target in order to
1431 * maintain better hysteresis and not have the free count always
1432 * be dead-on v_free_min.
1434 * NOTE: we are still in a critical section.
1436 * Pages moved from PQ_CACHE to totally free are not counted in the
1437 * pages_freed counter.
1439 static void
1440 vm_pageout_scan_cache(int avail_shortage, int pass,
1441 int vnodes_skipped, int recycle_count)
1443 static int lastkillticks;
1444 struct vm_pageout_scan_info info;
1445 vm_page_t m;
1447 while (vmstats.v_free_count <
1448 (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1450 * This steals some code from vm/vm_page.c
1452 static int cache_rover = 0;
1454 m = vm_page_list_find(PQ_CACHE,
1455 cache_rover & PQ_L2_MASK, FALSE);
1456 if (m == NULL)
1457 break;
1458 /* page is returned removed from its queue and spinlocked */
1459 if (vm_page_busy_try(m, TRUE)) {
1460 vm_page_deactivate_locked(m);
1461 vm_page_spin_unlock(m);
1462 continue;
1464 vm_page_spin_unlock(m);
1465 pagedaemon_wakeup();
1466 lwkt_yield();
1469 * Remaining operations run with the page busy and neither
1470 * the page or the queue will be spin-locked.
1472 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1473 m->hold_count ||
1474 m->wire_count) {
1475 vm_page_deactivate(m);
1476 vm_page_wakeup(m);
1477 continue;
1479 KKASSERT((m->flags & PG_MAPPED) == 0);
1480 KKASSERT(m->dirty == 0);
1481 cache_rover += PQ_PRIME2;
1482 vm_pageout_page_free(m);
1483 mycpu->gd_cnt.v_dfree++;
1486 #if !defined(NO_SWAPPING)
1488 * Idle process swapout -- run once per second.
1490 if (vm_swap_idle_enabled) {
1491 static time_t lsec;
1492 if (time_uptime != lsec) {
1493 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_IDLE);
1494 vm_req_vmdaemon();
1495 lsec = time_uptime;
1498 #endif
1501 * If we didn't get enough free pages, and we have skipped a vnode
1502 * in a writeable object, wakeup the sync daemon. And kick swapout
1503 * if we did not get enough free pages.
1505 if (vm_paging_target() > 0) {
1506 if (vnodes_skipped && vm_page_count_min(0))
1507 speedup_syncer(NULL);
1508 #if !defined(NO_SWAPPING)
1509 if (vm_swap_enabled && vm_page_count_target()) {
1510 atomic_set_int(&vm_pageout_req_swapout, VM_SWAP_NORMAL);
1511 vm_req_vmdaemon();
1513 #endif
1517 * Handle catastrophic conditions. Under good conditions we should
1518 * be at the target, well beyond our minimum. If we could not even
1519 * reach our minimum the system is under heavy stress. But just being
1520 * under heavy stress does not trigger process killing.
1522 * We consider ourselves to have run out of memory if the swap pager
1523 * is full and avail_shortage is still positive. The secondary check
1524 * ensures that we do not kill processes if the instantanious
1525 * availability is good, even if the pageout demon pass says it
1526 * couldn't get to the target.
1528 if (swap_pager_almost_full &&
1529 pass > 0 &&
1530 (vm_page_count_min(recycle_count) || avail_shortage > 0)) {
1531 kprintf("Warning: system low on memory+swap "
1532 "shortage %d for %d ticks!\n",
1533 avail_shortage, ticks - swap_fail_ticks);
1535 if (swap_pager_full &&
1536 pass > 1 &&
1537 avail_shortage > 0 &&
1538 vm_paging_target() > 0 &&
1539 (unsigned int)(ticks - lastkillticks) >= hz) {
1541 * Kill something, maximum rate once per second to give
1542 * the process time to free up sufficient memory.
1544 lastkillticks = ticks;
1545 info.bigproc = NULL;
1546 info.bigsize = 0;
1547 allproc_scan(vm_pageout_scan_callback, &info);
1548 if (info.bigproc != NULL) {
1549 info.bigproc->p_nice = PRIO_MIN;
1550 info.bigproc->p_usched->resetpriority(
1551 FIRST_LWP_IN_PROC(info.bigproc));
1552 atomic_set_int(&info.bigproc->p_flags, P_LOWMEMKILL);
1553 killproc(info.bigproc, "out of swap space");
1554 wakeup(&vmstats.v_free_count);
1555 PRELE(info.bigproc);
1560 static int
1561 vm_pageout_scan_callback(struct proc *p, void *data)
1563 struct vm_pageout_scan_info *info = data;
1564 vm_offset_t size;
1567 * Never kill system processes or init. If we have configured swap
1568 * then try to avoid killing low-numbered pids.
1570 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1571 ((p->p_pid < 48) && (vm_swap_size != 0))) {
1572 return (0);
1575 lwkt_gettoken(&p->p_token);
1578 * if the process is in a non-running type state,
1579 * don't touch it.
1581 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
1582 lwkt_reltoken(&p->p_token);
1583 return (0);
1587 * Get the approximate process size. Note that anonymous pages
1588 * with backing swap will be counted twice, but there should not
1589 * be too many such pages due to the stress the VM system is
1590 * under at this point.
1592 size = vmspace_anonymous_count(p->p_vmspace) +
1593 vmspace_swap_count(p->p_vmspace);
1596 * If the this process is bigger than the biggest one
1597 * remember it.
1599 if (info->bigsize < size) {
1600 if (info->bigproc)
1601 PRELE(info->bigproc);
1602 PHOLD(p);
1603 info->bigproc = p;
1604 info->bigsize = size;
1606 lwkt_reltoken(&p->p_token);
1607 lwkt_yield();
1609 return(0);
1613 * This routine tries to maintain the pseudo LRU active queue,
1614 * so that during long periods of time where there is no paging,
1615 * that some statistic accumulation still occurs. This code
1616 * helps the situation where paging just starts to occur.
1618 static void
1619 vm_pageout_page_stats(int q)
1621 static int fullintervalcount = 0;
1622 struct vm_page marker;
1623 vm_page_t m;
1624 int pcount, tpcount; /* Number of pages to check */
1625 int page_shortage;
1627 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1628 vmstats.v_free_min) -
1629 (vmstats.v_free_count + vmstats.v_inactive_count +
1630 vmstats.v_cache_count);
1632 if (page_shortage <= 0)
1633 return;
1635 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1636 fullintervalcount += vm_pageout_stats_interval;
1637 if (fullintervalcount < vm_pageout_full_stats_interval) {
1638 tpcount = (vm_pageout_stats_max * pcount) /
1639 vmstats.v_page_count + 1;
1640 if (pcount > tpcount)
1641 pcount = tpcount;
1642 } else {
1643 fullintervalcount = 0;
1646 bzero(&marker, sizeof(marker));
1647 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1648 marker.queue = PQ_ACTIVE + q;
1649 marker.pc = q;
1650 marker.wire_count = 1;
1652 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1653 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1656 * Queue locked at top of loop to avoid stack marker issues.
1658 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1659 pcount-- > 0)
1661 int actcount;
1663 KKASSERT(m->queue == PQ_ACTIVE + q);
1664 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1665 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1666 &marker, pageq);
1669 * Skip marker pages (atomic against other markers to avoid
1670 * infinite hop-over scans).
1672 if (m->flags & PG_MARKER)
1673 continue;
1676 * Ignore pages we can't busy
1678 if (vm_page_busy_try(m, TRUE))
1679 continue;
1682 * Remaining operations run with the page busy and neither
1683 * the page or the queue will be spin-locked.
1685 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1686 KKASSERT(m->queue == PQ_ACTIVE + q);
1689 * We now have a safely busied page, the page and queue
1690 * spinlocks have been released.
1692 * Ignore held pages
1694 if (m->hold_count) {
1695 vm_page_wakeup(m);
1696 goto next;
1700 * Calculate activity
1702 actcount = 0;
1703 if (m->flags & PG_REFERENCED) {
1704 vm_page_flag_clear(m, PG_REFERENCED);
1705 actcount += 1;
1707 actcount += pmap_ts_referenced(m);
1710 * Update act_count and move page to end of queue.
1712 if (actcount) {
1713 m->act_count += ACT_ADVANCE + actcount;
1714 if (m->act_count > ACT_MAX)
1715 m->act_count = ACT_MAX;
1716 vm_page_and_queue_spin_lock(m);
1717 if (m->queue - m->pc == PQ_ACTIVE) {
1718 TAILQ_REMOVE(
1719 &vm_page_queues[PQ_ACTIVE + q].pl,
1720 m, pageq);
1721 TAILQ_INSERT_TAIL(
1722 &vm_page_queues[PQ_ACTIVE + q].pl,
1723 m, pageq);
1725 vm_page_and_queue_spin_unlock(m);
1726 vm_page_wakeup(m);
1727 goto next;
1730 if (m->act_count == 0) {
1732 * We turn off page access, so that we have
1733 * more accurate RSS stats. We don't do this
1734 * in the normal page deactivation when the
1735 * system is loaded VM wise, because the
1736 * cost of the large number of page protect
1737 * operations would be higher than the value
1738 * of doing the operation.
1740 * We use the marker to save our place so
1741 * we can release the spin lock. both (m)
1742 * and (next) will be invalid.
1744 vm_page_protect(m, VM_PROT_NONE);
1745 vm_page_deactivate(m);
1746 } else {
1747 m->act_count -= min(m->act_count, ACT_DECLINE);
1748 vm_page_and_queue_spin_lock(m);
1749 if (m->queue - m->pc == PQ_ACTIVE) {
1750 TAILQ_REMOVE(
1751 &vm_page_queues[PQ_ACTIVE + q].pl,
1752 m, pageq);
1753 TAILQ_INSERT_TAIL(
1754 &vm_page_queues[PQ_ACTIVE + q].pl,
1755 m, pageq);
1757 vm_page_and_queue_spin_unlock(m);
1759 vm_page_wakeup(m);
1760 next:
1761 vm_page_queues_spin_lock(PQ_ACTIVE + q);
1765 * Remove our local marker
1767 * Page queue still spin-locked.
1769 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1770 vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1773 static int
1774 vm_pageout_free_page_calc(vm_size_t count)
1776 if (count < vmstats.v_page_count)
1777 return 0;
1779 * free_reserved needs to include enough for the largest swap pager
1780 * structures plus enough for any pv_entry structs when paging.
1782 * v_free_min normal allocations
1783 * v_free_reserved system allocations
1784 * v_pageout_free_min allocations by pageout daemon
1785 * v_interrupt_free_min low level allocations (e.g swap structures)
1787 if (vmstats.v_page_count > 1024)
1788 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1789 else
1790 vmstats.v_free_min = 64;
1793 * Make sure the vmmeter slop can't blow out our global minimums.
1795 if (vmstats.v_free_min < VMMETER_SLOP_COUNT * ncpus * 10)
1796 vmstats.v_free_min = VMMETER_SLOP_COUNT * ncpus * 10;
1798 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1799 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1800 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1801 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1803 return 1;
1808 * vm_pageout is the high level pageout daemon.
1810 * No requirements.
1812 static void
1813 vm_pageout_thread(void)
1815 int pass;
1816 int q;
1817 int q1iterator = 0;
1818 int q2iterator = 0;
1821 * Initialize some paging parameters.
1823 curthread->td_flags |= TDF_SYSTHREAD;
1825 vm_pageout_free_page_calc(vmstats.v_page_count);
1828 * v_free_target and v_cache_min control pageout hysteresis. Note
1829 * that these are more a measure of the VM cache queue hysteresis
1830 * then the VM free queue. Specifically, v_free_target is the
1831 * high water mark (free+cache pages).
1833 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1834 * low water mark, while v_free_min is the stop. v_cache_min must
1835 * be big enough to handle memory needs while the pageout daemon
1836 * is signalled and run to free more pages.
1838 if (vmstats.v_free_count > 6144)
1839 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1840 else
1841 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1844 * NOTE: With the new buffer cache b_act_count we want the default
1845 * inactive target to be a percentage of available memory.
1847 * The inactive target essentially determines the minimum
1848 * number of 'temporary' pages capable of caching one-time-use
1849 * files when the VM system is otherwise full of pages
1850 * belonging to multi-time-use files or active program data.
1852 * NOTE: The inactive target is aggressively persued only if the
1853 * inactive queue becomes too small. If the inactive queue
1854 * is large enough to satisfy page movement to free+cache
1855 * then it is repopulated more slowly from the active queue.
1856 * This allows a general inactive_target default to be set.
1858 * There is an issue here for processes which sit mostly idle
1859 * 'overnight', such as sshd, tcsh, and X. Any movement from
1860 * the active queue will eventually cause such pages to
1861 * recycle eventually causing a lot of paging in the morning.
1862 * To reduce the incidence of this pages cycled out of the
1863 * buffer cache are moved directly to the inactive queue if
1864 * they were only used once or twice.
1866 * The vfs.vm_cycle_point sysctl can be used to adjust this.
1867 * Increasing the value (up to 64) increases the number of
1868 * buffer recyclements which go directly to the inactive queue.
1870 if (vmstats.v_free_count > 2048) {
1871 vmstats.v_cache_min = vmstats.v_free_target;
1872 vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1873 } else {
1874 vmstats.v_cache_min = 0;
1875 vmstats.v_cache_max = 0;
1877 vmstats.v_inactive_target = vmstats.v_free_count / 4;
1879 /* XXX does not really belong here */
1880 if (vm_page_max_wired == 0)
1881 vm_page_max_wired = vmstats.v_free_count / 3;
1883 if (vm_pageout_stats_max == 0)
1884 vm_pageout_stats_max = vmstats.v_free_target;
1887 * Set interval in seconds for stats scan.
1889 if (vm_pageout_stats_interval == 0)
1890 vm_pageout_stats_interval = 5;
1891 if (vm_pageout_full_stats_interval == 0)
1892 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1896 * Set maximum free per pass
1898 if (vm_pageout_stats_free_max == 0)
1899 vm_pageout_stats_free_max = 5;
1901 swap_pager_swap_init();
1902 pass = 0;
1905 * The pageout daemon is never done, so loop forever.
1907 while (TRUE) {
1908 int error;
1909 int avail_shortage;
1910 int inactive_shortage;
1911 int vnodes_skipped = 0;
1912 int recycle_count = 0;
1913 int tmp;
1916 * Wait for an action request. If we timeout check to
1917 * see if paging is needed (in case the normal wakeup
1918 * code raced us).
1920 if (vm_pages_needed == 0) {
1921 error = tsleep(&vm_pages_needed,
1922 0, "psleep",
1923 vm_pageout_stats_interval * hz);
1924 if (error &&
1925 vm_paging_needed() == 0 &&
1926 vm_pages_needed == 0) {
1927 for (q = 0; q < PQ_L2_SIZE; ++q)
1928 vm_pageout_page_stats(q);
1929 continue;
1931 vm_pages_needed = 1;
1934 mycpu->gd_cnt.v_pdwakeups++;
1937 * Scan for INACTIVE->CLEAN/PAGEOUT
1939 * This routine tries to avoid thrashing the system with
1940 * unnecessary activity.
1942 * Calculate our target for the number of free+cache pages we
1943 * want to get to. This is higher then the number that causes
1944 * allocations to stall (severe) in order to provide hysteresis,
1945 * and if we don't make it all the way but get to the minimum
1946 * we're happy. Goose it a bit if there are multiple requests
1947 * for memory.
1949 * Don't reduce avail_shortage inside the loop or the
1950 * PQAVERAGE() calculation will break.
1952 * NOTE! deficit is differentiated from avail_shortage as
1953 * REQUIRING at least (deficit) pages to be cleaned,
1954 * even if the page queues are in good shape. This
1955 * is used primarily for handling per-process
1956 * RLIMIT_RSS and may also see small values when
1957 * processes block due to low memory.
1959 vmstats_rollup();
1960 avail_shortage = vm_paging_target() + vm_pageout_deficit;
1961 vm_pageout_deficit = 0;
1963 if (avail_shortage > 0) {
1964 int delta = 0;
1966 for (q = 0; q < PQ_L2_SIZE; ++q) {
1967 delta += vm_pageout_scan_inactive(
1968 pass,
1969 (q + q1iterator) & PQ_L2_MASK,
1970 PQAVERAGE(avail_shortage),
1971 &vnodes_skipped);
1972 if (avail_shortage - delta <= 0)
1973 break;
1975 avail_shortage -= delta;
1976 q1iterator = q + 1;
1980 * Figure out how many active pages we must deactivate. If
1981 * we were able to reach our target with just the inactive
1982 * scan above we limit the number of active pages we
1983 * deactivate to reduce unnecessary work.
1985 vmstats_rollup();
1986 inactive_shortage = vmstats.v_inactive_target -
1987 vmstats.v_inactive_count;
1990 * If we were unable to free sufficient inactive pages to
1991 * satisfy the free/cache queue requirements then simply
1992 * reaching the inactive target may not be good enough.
1993 * Try to deactivate pages in excess of the target based
1994 * on the shortfall.
1996 * However to prevent thrashing the VM system do not
1997 * deactivate more than an additional 1/10 the inactive
1998 * target's worth of active pages.
2000 if (avail_shortage > 0) {
2001 tmp = avail_shortage * 2;
2002 if (tmp > vmstats.v_inactive_target / 10)
2003 tmp = vmstats.v_inactive_target / 10;
2004 inactive_shortage += tmp;
2008 * Only trigger a pmap cleanup on inactive shortage.
2010 if (inactive_shortage > 0) {
2011 pmap_collect();
2015 * Scan for ACTIVE->INACTIVE
2017 * Only trigger on inactive shortage. Triggering on
2018 * avail_shortage can starve the active queue with
2019 * unnecessary active->inactive transitions and destroy
2020 * performance.
2022 if (/*avail_shortage > 0 ||*/ inactive_shortage > 0) {
2023 int delta = 0;
2025 for (q = 0; q < PQ_L2_SIZE; ++q) {
2026 delta += vm_pageout_scan_active(
2027 pass,
2028 (q + q2iterator) & PQ_L2_MASK,
2029 PQAVERAGE(avail_shortage),
2030 PQAVERAGE(inactive_shortage),
2031 &recycle_count);
2032 if (inactive_shortage - delta <= 0 &&
2033 avail_shortage - delta <= 0) {
2034 break;
2037 inactive_shortage -= delta;
2038 avail_shortage -= delta;
2039 q2iterator = q + 1;
2043 * Scan for CACHE->FREE
2045 * Finally free enough cache pages to meet our free page
2046 * requirement and take more drastic measures if we are
2047 * still in trouble.
2049 vmstats_rollup();
2050 vm_pageout_scan_cache(avail_shortage, pass,
2051 vnodes_skipped, recycle_count);
2054 * Wait for more work.
2056 if (avail_shortage > 0) {
2057 ++pass;
2058 if (pass < 10 && vm_pages_needed > 1) {
2060 * Normal operation, additional processes
2061 * have already kicked us. Retry immediately
2062 * unless swap space is completely full in
2063 * which case delay a bit.
2065 if (swap_pager_full) {
2066 tsleep(&vm_pages_needed, 0, "pdelay",
2067 hz / 5);
2068 } /* else immediate retry */
2069 } else if (pass < 10) {
2071 * Normal operation, fewer processes. Delay
2072 * a bit but allow wakeups.
2074 vm_pages_needed = 0;
2075 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2076 vm_pages_needed = 1;
2077 } else if (swap_pager_full == 0) {
2079 * We've taken too many passes, forced delay.
2081 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2082 } else {
2084 * Running out of memory, catastrophic
2085 * back-off to one-second intervals.
2087 tsleep(&vm_pages_needed, 0, "pdelay", hz);
2089 } else if (vm_pages_needed) {
2091 * Interlocked wakeup of waiters (non-optional).
2093 * Similar to vm_page_free_wakeup() in vm_page.c,
2094 * wake
2096 pass = 0;
2097 if (!vm_page_count_min(vm_page_free_hysteresis) ||
2098 !vm_page_count_target()) {
2099 vm_pages_needed = 0;
2100 wakeup(&vmstats.v_free_count);
2102 } else {
2103 pass = 0;
2108 static struct kproc_desc page_kp = {
2109 "pagedaemon",
2110 vm_pageout_thread,
2111 &pagethread
2113 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp);
2117 * Called after allocating a page out of the cache or free queue
2118 * to possibly wake the pagedaemon up to replentish our supply.
2120 * We try to generate some hysteresis by waking the pagedaemon up
2121 * when our free+cache pages go below the free_min+cache_min level.
2122 * The pagedaemon tries to get the count back up to at least the
2123 * minimum, and through to the target level if possible.
2125 * If the pagedaemon is already active bump vm_pages_needed as a hint
2126 * that there are even more requests pending.
2128 * SMP races ok?
2129 * No requirements.
2131 void
2132 pagedaemon_wakeup(void)
2134 if (vm_paging_needed() && curthread != pagethread) {
2135 if (vm_pages_needed == 0) {
2136 vm_pages_needed = 1; /* SMP race ok */
2137 wakeup(&vm_pages_needed);
2138 } else if (vm_page_count_min(0)) {
2139 ++vm_pages_needed; /* SMP race ok */
2144 #if !defined(NO_SWAPPING)
2147 * SMP races ok?
2148 * No requirements.
2150 static void
2151 vm_req_vmdaemon(void)
2153 static int lastrun = 0;
2155 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2156 wakeup(&vm_daemon_needed);
2157 lastrun = ticks;
2161 static int vm_daemon_callback(struct proc *p, void *data __unused);
2164 * No requirements.
2166 static void
2167 vm_daemon(void)
2169 int req_swapout;
2171 while (TRUE) {
2172 tsleep(&vm_daemon_needed, 0, "psleep", 0);
2173 req_swapout = atomic_swap_int(&vm_pageout_req_swapout, 0);
2176 * forced swapouts
2178 if (req_swapout)
2179 swapout_procs(vm_pageout_req_swapout);
2182 * scan the processes for exceeding their rlimits or if
2183 * process is swapped out -- deactivate pages
2185 allproc_scan(vm_daemon_callback, NULL);
2189 static int
2190 vm_daemon_callback(struct proc *p, void *data __unused)
2192 struct vmspace *vm;
2193 vm_pindex_t limit, size;
2196 * if this is a system process or if we have already
2197 * looked at this process, skip it.
2199 lwkt_gettoken(&p->p_token);
2201 if (p->p_flags & (P_SYSTEM | P_WEXIT)) {
2202 lwkt_reltoken(&p->p_token);
2203 return (0);
2207 * if the process is in a non-running type state,
2208 * don't touch it.
2210 if (p->p_stat != SACTIVE && p->p_stat != SSTOP && p->p_stat != SCORE) {
2211 lwkt_reltoken(&p->p_token);
2212 return (0);
2216 * get a limit
2218 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2219 p->p_rlimit[RLIMIT_RSS].rlim_max));
2222 * let processes that are swapped out really be
2223 * swapped out. Set the limit to nothing to get as
2224 * many pages out to swap as possible.
2226 if (p->p_flags & P_SWAPPEDOUT)
2227 limit = 0;
2229 vm = p->p_vmspace;
2230 vmspace_hold(vm);
2231 size = pmap_resident_tlnw_count(&vm->vm_pmap);
2232 if (limit >= 0 && size > 4096 &&
2233 size - 4096 >= limit && vm_pageout_memuse_mode >= 1) {
2234 vm_pageout_map_deactivate_pages(&vm->vm_map, limit);
2236 vmspace_drop(vm);
2238 lwkt_reltoken(&p->p_token);
2240 return (0);
2243 #endif