kernel - Fix longstanding VM long-duration stall issues (2)
[dragonfly.git] / sys / vm / vm_page2.h
blob354192d5fbca5cd8b183d5dd7bf30f27809c0d24
1 /*-
2 * Copyright (c) 1982, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * @(#)vmmeter.h 8.2 (Berkeley) 7/10/94
34 * $FreeBSD: src/sys/sys/vmmeter.h,v 1.21.2.2 2002/10/10 19:28:21 dillon Exp $
35 * $DragonFly: src/sys/vm/vm_page2.h,v 1.3 2008/04/14 20:00:29 dillon Exp $
38 #ifndef _VM_VM_PAGE2_H_
39 #define _VM_VM_PAGE2_H_
41 #ifndef _SYS_VMMETER_H_
42 #include <sys/vmmeter.h>
43 #endif
44 #ifndef _SYS_QUEUE_H_
45 #include <sys/queue.h>
46 #endif
47 #ifndef _VM_PAGE_H_
48 #include <vm/vm_page.h>
49 #endif
51 #ifdef _KERNEL
54 * Return TRUE if we are under our severe low-free-pages threshold
56 * This causes user processes to stall to avoid exhausting memory that
57 * the kernel might need.
59 * reserved < severe < minimum < target < paging_target
61 static __inline
62 int
63 vm_page_count_severe(void)
65 return (vmstats.v_free_severe >
66 vmstats.v_free_count + vmstats.v_cache_count ||
67 vmstats.v_free_reserved > vmstats.v_free_count);
71 * Return TRUE if we are under our minimum low-free-pages threshold.
72 * This activates the pageout demon. The pageout demon tries to
73 * reach the target but may stop once it satisfies the minimum.
75 * reserved < severe < minimum < target < paging_target
77 static __inline
78 int
79 vm_page_count_min(int donotcount)
81 return (vmstats.v_free_min + donotcount >
82 (vmstats.v_free_count + vmstats.v_cache_count) ||
83 vmstats.v_free_reserved > vmstats.v_free_count);
87 * Return TRUE if we are under our free page target. The pageout demon
88 * tries to reach the target but may stop once it gets past the min.
90 static __inline
91 int
92 vm_page_count_target(void)
94 return (vmstats.v_free_target >
95 (vmstats.v_free_count + vmstats.v_cache_count) ||
96 vmstats.v_free_reserved > vmstats.v_free_count);
100 * Return the number of pages the pageout daemon needs to move into the
101 * cache or free lists. A negative number means we have sufficient free
102 * pages.
104 * The target free+cache is greater than vm_page_count_target(). The
105 * frontend uses vm_page_count_target() while the backend continue freeing
106 * based on vm_paging_target().
108 * This function DOES NOT return TRUE or FALSE.
110 static __inline
112 vm_paging_target(void)
114 return (
115 (vmstats.v_free_target + vmstats.v_cache_min) -
116 (vmstats.v_free_count + vmstats.v_cache_count)
121 * Return TRUE if hysteresis dictates we should nominally wakeup the
122 * pageout daemon to start working on freeing up some memory. This
123 * routine should NOT be used to determine when to block on the VM system.
124 * We want to wakeup the pageout daemon before we might otherwise block.
126 * Paging begins when cache+free drops below cache_min + free_min.
128 static __inline
130 vm_paging_needed(void)
132 if (vmstats.v_free_min + vmstats.v_cache_min >
133 vmstats.v_free_count + vmstats.v_cache_count) {
134 return 1;
136 if (vmstats.v_free_min > vmstats.v_free_count)
137 return 1;
138 return 0;
141 static __inline
142 void
143 vm_page_event(vm_page_t m, vm_page_event_t event)
145 if (m->flags & PG_ACTIONLIST)
146 vm_page_event_internal(m, event);
149 static __inline
150 void
151 vm_page_init_action(vm_page_t m, vm_page_action_t action,
152 void (*func)(vm_page_t, vm_page_action_t), void *data)
154 action->m = m;
155 action->func = func;
156 action->data = data;
160 * Clear dirty bits in the VM page but truncate the
161 * end to a DEV_BSIZE'd boundary.
163 * Used when reading data in, typically via getpages.
164 * The partial device block at the end of the truncation
165 * range should not lose its dirty bit.
167 * NOTE: This function does not clear the pmap modified bit.
169 static __inline
170 void
171 vm_page_clear_dirty_end_nonincl(vm_page_t m, int base, int size)
173 size = (base + size) & ~DEV_BMASK;
174 if (base < size)
175 vm_page_clear_dirty(m, base, size - base);
179 * Clear dirty bits in the VM page but truncate the
180 * beginning to a DEV_BSIZE'd boundary.
182 * Used when truncating a buffer. The partial device
183 * block at the beginning of the truncation range
184 * should not lose its dirty bit.
186 * NOTE: This function does not clear the pmap modified bit.
188 static __inline
189 void
190 vm_page_clear_dirty_beg_nonincl(vm_page_t m, int base, int size)
192 size += base;
193 base = (base + DEV_BMASK) & ~DEV_BMASK;
194 if (base < size)
195 vm_page_clear_dirty(m, base, size - base);
198 #endif /* _KERNEL */
199 #endif /* _VM_VM_PAGE2_H_ */